library(tidyverse)
library(skimr)
library(brms)

Data import and wrangle

style <- read_csv("../input/Styleelongation2017_ZTslh.csv") %>%
  complete(day, floret, treatment, time)

── Column specification ───────────────────────────────────────────────────────────────────────────────────
cols(
  day = col_double(),
  floret = col_double(),
  treatment = col_character(),
  time = col_time(format = ""),
  `length (mm)` = col_double(),
  ZT = col_double()
)
style <- style %>% rename(trt=treatment, length=`length (mm)`) %>%
  mutate(hour=as.numeric(time-min(time)) / 3600,  # elapsed hours, first hour = 0
         id=str_c(floret,day,trt,sep="-"))  # unique id for each floret
style %>% mutate(trt=as.factor(trt)) %>% skim()
── Data Summary ────────────────────────
                           Values    
Name                       Piped data
Number of rows             1368      
Number of columns          8         
_______________________              
Column type frequency:               
  character                1         
  difftime                 1         
  factor                   1         
  numeric                  5         
________________________             
Group variables            None      

── Variable type: character ───────────────────────────────────────────────────────────────────────────────
  skim_variable n_missing complete_rate   min   max empty n_unique whitespace
1 id                    0             1     8    12     0       72          0

── Variable type: difftime ────────────────────────────────────────────────────────────────────────────────
  skim_variable n_missing complete_rate min        max        median     n_unique
1 time                  0             1 19800 secs 36000 secs 27900 secs       19

── Variable type: factor ──────────────────────────────────────────────────────────────────────────────────
  skim_variable n_missing complete_rate ordered n_unique top_counts                  
1 trt                   0             1 FALSE          3 Eas: 456, Wes: 456, Wes: 456

── Variable type: numeric ─────────────────────────────────────────────────────────────────────────────────
  skim_variable n_missing complete_rate  mean    sd    p0   p25   p50   p75  p100 hist 
1 day                   0         1      5.12 2.71   1     2.75  5.5   7.25   9   ▇▃▃▇▇
2 floret                0         1      2    0.817  1     1     2     3      3   ▇▁▇▁▇
3 length               45         0.967  8.47 1.37   6.24  7.40  8.07  9.35  13.7 ▇▇▃▁▁
4 ZT                   39         0.971  1.19 1.34  -1     0     1.25  2.25   3.5 ▇▇▆▇▇
5 hour                  0         1      2.25 1.37   0     1     2.25  3.5    4.5 ▇▇▆▇▇
unique(style$trt)
[1] "East"     "West"     "WestHeat"

Plot to see what we have:

style %>%
  ggplot(aes(x=time, y=length, color=trt, shape=as.character(floret))) +
  geom_point() +
  facet_wrap(~day)
Warning: Removed 45 rows containing missing values (geom_point).

remove a few missing data rows that are not at the end of the series

style <- style %>% filter(day!=1 | floret!=3 | trt!="East" | hour!=3.5) %>%
  filter(!(is.na(length) & hour < 2))

Fix height to max height. This is necessary because our growth model can’t account for shrinkage after reaching max height. Fill in missing end-of-series values, but note that they are censored.

style <- style %>% 
  mutate(censored = ifelse(is.na(length), "right", "none")) %>%
  group_by(id) %>%
  arrange(hour) %>% 
  mutate(length2=ifelse((length < max(length, na.rm = TRUE) | is.na(length)) & row_number() > which.max(length),
                       max(length, na.rm = TRUE), length)) %>%
  ungroup() %>%
  filter(!is.na(length2))

Plot smoothed data

style %>%
  ggplot(aes(x=time, y=length2, color=trt)) +
  geom_smooth() +
  facet_wrap(~day)
`geom_smooth()` using method = 'loess' and formula 'y ~ x'

And plot averaged over days:

style %>%
  ggplot(aes(x=time, y=length2, color=trt)) +
  geom_smooth()
`geom_smooth()` using method = 'loess' and formula 'y ~ x'

prior predictions

To help figure out reasonable priors

weibull <- function(Lmin, Lmax, k, delta, hour) {
  y <- Lmax - (Lmax - Lmin) * exp(-(k*hour)^delta)
  tibble(hour=hour, y=y)
}

set up tibble with some priors

x <- seq(0,10,.01)
plot(x, dexp(x,.1), type = "l")

k only

tibble(
  Lmin=7.5,#rnorm(50, 7.5, 1),
  Lmax=11.5,#rnorm(50, 11.5, 1),
  k=rexp(50,1),
  delta=1) %>% #rnorm(50,1,.5)) %>%
  mutate(delta=ifelse(delta < 0, 0, delta)) %>%
  
  mutate(prediction=pmap(list(Lmin, Lmax, k, delta), weibull, seq(0,4.5,.1) ),
         sample=row_number()) %>%
  unnest(prediction) %>%

  ggplot(aes(x=hour,y=y, group=sample)) +
  geom_line(alpha=.2)

delta only

tibble(
  Lmin=7.5,#rnorm(50, 7.5, 1),
  Lmax=11.5,#rnorm(50, 11.5, 1),
  k=.5,
  delta=rexp(50,.5)) %>%
  #mutate(delta=ifelse(delta < 0, 0, delta)) %>%
  
  mutate(prediction=pmap(list(Lmin, Lmax, k, delta), weibull, seq(0,4.5,.1) ),
         sample=row_number()) %>%
  unnest(prediction) %>%

  ggplot(aes(x=hour,y=y, group=sample)) +
  geom_line(alpha=.2)

delta and k

tibble(
  Lmin=7.5,#rnorm(50, 7.5, 1),
  Lmax=11.5,#rnorm(50, 11.5, 1),
  k=rexp(100, 1),
  delta=rexp(100,.5)) %>%
  mutate(delta=ifelse(delta < 0, 0, delta)) %>%
  
  mutate(prediction=pmap(list(Lmin, Lmax, k, delta), weibull, seq(0,4.5,.1) ),
         sample=row_number()) %>%
  unnest(prediction) %>%

  ggplot(aes(x=hour,y=y, group=sample)) +
  geom_line(alpha=.2)

all together

tibble(
  Lmin=rnorm(100, 7.5, .25),
  Lmax=rnorm(100, 11.5, .25),
  k=rexp(100, 1),
  delta=rexp(100, 0.5)) %>%
  mutate(prediction=pmap(list(Lmin, Lmax, k, delta), weibull, seq(0,4.5,.1) ),
         sample=row_number()) %>%
  unnest(prediction) %>%

  ggplot(aes(x=hour,y=y, group=sample)) +
  geom_line(alpha=.2)

models

priors

what priors are available to be set?

get_prior(bf(
    length2 | cens(censored) ~ Lmax - (Lmax - Lmin) * exp(-(k*hour)^delta), #Weibull model
    Lmax + Lmin ~ 1, 
    delta + k ~ trt + (1|id),
    nl=TRUE ),
  data=style)
                prior class        coef group resp dpar nlpar bound       source
 student_t(3, 0, 2.5) sigma                                              default
               (flat)     b                             delta            default
               (flat)     b   Intercept                 delta       (vectorized)
               (flat)     b     trtWest                 delta       (vectorized)
               (flat)     b trtWestHeat                 delta       (vectorized)
 student_t(3, 0, 2.5)    sd                             delta            default
 student_t(3, 0, 2.5)    sd                id           delta       (vectorized)
 student_t(3, 0, 2.5)    sd   Intercept    id           delta       (vectorized)
               (flat)     b                                 k            default
               (flat)     b   Intercept                     k       (vectorized)
               (flat)     b     trtWest                     k       (vectorized)
               (flat)     b trtWestHeat                     k       (vectorized)
 student_t(3, 0, 2.5)    sd                                 k            default
 student_t(3, 0, 2.5)    sd                id               k       (vectorized)
 student_t(3, 0, 2.5)    sd   Intercept    id               k       (vectorized)
               (flat)     b                              Lmax            default
               (flat)     b   Intercept                  Lmax       (vectorized)
               (flat)     b                              Lmin            default
               (flat)     b   Intercept                  Lmin       (vectorized)

minimal

prior1 <- c(prior(normal(7.5,.25), nlpar="Lmin"),
            prior(normal(11.5, .25),nlpar="Lmax"),
            prior(exponential(1),nlpar="k", lb=0),
            prior(exponential(.5),nlpar="delta", lb=0) # 1 is exponential model
)

fit1c <- brm(
  formula=bf(
    length2 | cens(censored) ~ Lmax - (Lmax - Lmin) * exp(-(k*hour)^delta), #Weibull model
    Lmax + Lmin + k + delta ~ 1, #minimal model, parameters do not vary for treatment or random effects
    nl=TRUE),
  data=style,
  prior=prior1,
  sample_prior = "yes",
  cores=4,
  chains=4)
Compiling Stan program...
Start sampling
starting worker pid=34485 on localhost:11685 at 15:39:20.689
starting worker pid=34499 on localhost:11685 at 15:39:20.983
starting worker pid=34513 on localhost:11685 at 15:39:21.255
starting worker pid=34527 on localhost:11685 at 15:39:21.518

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
Chain 1: Rejecting initial value:
Chain 1:   Log probability evaluates to log(0), i.e. negative infinity.
Chain 1:   Stan can't start sampling from this initial value.
Chain 1: Rejecting initial value:
Chain 1:   Log probability evaluates to log(0), i.e. negative infinity.
Chain 1:   Stan can't start sampling from this initial value.
Chain 1: Rejecting initial value:
Chain 1:   Log probability evaluates to log(0), i.e. negative infinity.
Chain 1:   Stan can't start sampling from this initial value.
Chain 1: Rejecting initial value:
Chain 1:   Log probability evaluates to log(0), i.e. negative infinity.
Chain 1:   Stan can't start sampling from this initial value.
Chain 1: Rejecting initial value:
Chain 1:   Log probability evaluates to log(0), i.e. negative infinity.
Chain 1:   Stan can't start sampling from this initial value.
Chain 1: Rejecting initial value:
Chain 1:   Log probability evaluates to log(0), i.e. negative infinity.
Chain 1:   Stan can't start sampling from this initial value.
Chain 1: Rejecting initial value:
Chain 1:   Log probability evaluates to log(0), i.e. negative infinity.
Chain 1:   Stan can't start sampling from this initial value.
Chain 1: Rejecting initial value:
Chain 1:   Log probability evaluates to log(0), i.e. negative infinity.
Chain 1:   Stan can't start sampling from this initial value.
Chain 1: 
Chain 1: Gradient evaluation took 0.001376 seconds
Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 13.76 seconds.
Chain 1: Adjust your expectations accordingly!
Chain 1: 
Chain 1: 
Chain 1: Iteration:    1 / 2000 [  0%]  (Warmup)

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
Chain 2: 
Chain 2: Gradient evaluation took 0.00087 seconds
Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 8.7 seconds.
Chain 2: Adjust your expectations accordingly!
Chain 2: 
Chain 2: 
Chain 2: Iteration:    1 / 2000 [  0%]  (Warmup)

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 3).
Chain 3: Rejecting initial value:
Chain 3:   Log probability evaluates to log(0), i.e. negative infinity.
Chain 3:   Stan can't start sampling from this initial value.
Chain 3: 
Chain 3: Gradient evaluation took 0.000872 seconds
Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 8.72 seconds.
Chain 3: Adjust your expectations accordingly!
Chain 3: 
Chain 3: 
Chain 3: Iteration:    1 / 2000 [  0%]  (Warmup)

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 4).
Chain 4: 
Chain 4: Gradient evaluation took 0.000855 seconds
Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 8.55 seconds.
Chain 4: Adjust your expectations accordingly!
Chain 4: 
Chain 4: 
Chain 4: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 1: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 2: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 3: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 4: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 1: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 2: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 3: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 4: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 1: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 2: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 3: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 4: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 2: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 1: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 3: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 4: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 2: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 2: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 1: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 1: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 4: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 4: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 3: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 3: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 2: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 1: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 4: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 3: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 2: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 1: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 3: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 4: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 2: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 1: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 3: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 4: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 2: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 1: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 3: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 4: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 2: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 2: 
Chain 2:  Elapsed Time: 27.675 seconds (Warm-up)
Chain 2:                28.128 seconds (Sampling)
Chain 2:                55.803 seconds (Total)
Chain 2: 
Chain 1: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 1: 
Chain 1:  Elapsed Time: 29.426 seconds (Warm-up)
Chain 1:                29.167 seconds (Sampling)
Chain 1:                58.593 seconds (Total)
Chain 1: 
Chain 3: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 3: 
Chain 3:  Elapsed Time: 28.75 seconds (Warm-up)
Chain 3:                28.803 seconds (Sampling)
Chain 3:                57.553 seconds (Total)
Chain 3: 
Chain 4: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 4: 
Chain 4:  Elapsed Time: 27.737 seconds (Warm-up)
Chain 4:                30.294 seconds (Sampling)
Chain 4:                58.031 seconds (Total)
Chain 4: 
gc()
           used  (Mb) gc trigger  (Mb) limit (Mb)  max used   (Mb)
Ncells  5383764 287.6    8292668 442.9         NA   8292668  442.9
Vcells 16841369 128.5   83330576 635.8      16384 203442360 1552.2
fit1c <- add_criterion(fit1c, "loo")
summary(fit1c)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: length2 | cens(censored) ~ Lmax - (Lmax - Lmin) * exp(-(k * hour)^delta) 
         Lmax ~ 1
         Lmin ~ 1
         k ~ 1
         delta ~ 1
   Data: style (Number of observations: 1362) 
Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup samples = 4000

Population-Level Effects: 
                Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Lmax_Intercept     11.34      0.23    10.91    11.80 1.00     1487     2019
Lmin_Intercept      7.40      0.05     7.29     7.51 1.00     2226     2007
k_Intercept         0.28      0.01     0.26     0.30 1.00     1596     2264
delta_Intercept     3.00      0.24     2.59     3.53 1.00     1646     1997

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     1.04      0.02     1.01     1.09 1.00     2793     2325

Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
gc()
           used  (Mb) gc trigger  (Mb) limit (Mb)  max used   (Mb)
Ncells  5383518 287.6    8292668 442.9         NA   8292668  442.9
Vcells 16603765 126.7   66664461 508.7      16384 203442360 1552.2
plot(fit1c)

gc()
           used  (Mb) gc trigger  (Mb) limit (Mb)  max used   (Mb)
Ncells  5501005 293.8    8292668 442.9         NA   8292668  442.9
Vcells 17411913 132.9   53331569 406.9      16384 203442360 1552.2
pairs(fit1c)

gc()
           used  (Mb) gc trigger  (Mb) limit (Mb)  max used   (Mb)
Ncells  5676939 303.2    8292668 442.9         NA   8292668  442.9
Vcells 18319951 139.8   53331569 406.9      16384 203442360 1552.2
pred1c <- predict(fit1c)
gc()
           used  (Mb) gc trigger  (Mb) limit (Mb)  max used   (Mb)
Ncells  5388768 287.8    8292668 442.9         NA   8292668  442.9
Vcells 16597600 126.7   61578767 469.9      16384 203442360 1552.2
pred1c <- cbind(pred1c, style)

pred1c %>% ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_smooth(span=1) +
  geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_line(aes(y=Estimate),color="yellow") 
`geom_smooth()` using method = 'loess' and formula 'y ~ x'

pp_check(fit1c)
Using 10 posterior samples for ppc type 'dens_overlay' by default.
Warning: Censored responses are not shown in 'pp_check'.

fit2c:


prior2 <- c(prior(normal(7.5,.25), nlpar="Lmin"),
            prior(normal(11.5, .25),nlpar="Lmax"),
            prior(exponential(1),nlpar="k", lb=0),
            prior(exponential(0.5),nlpar="delta"), # 1 is exponential model
            prior(normal(0,0.5), nlpar="delta", coef="trtWest"),
            prior(normal(0,0.5), nlpar="delta", coef="trtWestHeat")
)

fit2c <- brm( # ignore the warning about lower bounds.  If we set it for the intercept, it impacts the other delta coefficents as well and we don't want that.
  formula=bf(
    length2 | cens(censored) ~ Lmax - (Lmax - Lmin) * exp(-(k*hour)^delta), #Weibull model
    Lmax + Lmin + k ~ 1, 
    delta ~ trt + (1|id),   # 
    nl=TRUE ),
  data=style,
  prior=prior2,
  sample_prior = "yes",
  cores=4,
  chains=4,
  iter=5000 #,
  #control=list(adapt_delta=0.9)
  )
Warning: It appears as if you have specified a lower bounded prior on a parameter that has no natural lower bound.
If this is really what you want, please specify argument 'lb' of 'set_prior' appropriately.
Warning occurred for prior 
b_delta ~ exponential(0.5)

Compiling Stan program...
Start sampling
starting worker pid=35268 on localhost:11685 at 16:11:17.561
starting worker pid=35282 on localhost:11685 at 16:11:17.823
starting worker pid=35296 on localhost:11685 at 16:11:18.135
starting worker pid=35310 on localhost:11685 at 16:11:18.474

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
Chain 1: Rejecting initial value:
Chain 1:   Error evaluating the log probability at the initial value.
Chain 1: Exception: exponential_lpdf: Random variable is -1.33308, but must be nonnegative! (in 'anon_model', line 78, column 2 to column 47)
Chain 1: Rejecting initial value:
Chain 1:   Log probability evaluates to log(0), i.e. negative infinity.
Chain 1:   Stan can't start sampling from this initial value.
Chain 1: 
Chain 1: Gradient evaluation took 0.002339 seconds
Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 23.39 seconds.
Chain 1: Adjust your expectations accordingly!
Chain 1: 
Chain 1: 
Chain 1: Iteration:    1 / 5000 [  0%]  (Warmup)

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
Chain 2: Rejecting initial value:
Chain 2:   Error evaluating the log probability at the initial value.
Chain 2: Exception: exponential_lpdf: Random variable is -0.020832, but must be nonnegative! (in 'anon_model', line 78, column 2 to column 47)
Chain 2: Rejecting initial value:
Chain 2:   Error evaluating the log probability at the initial value.
Chain 2: Exception: exponential_lpdf: Random variable is -0.763222, but must be nonnegative! (in 'anon_model', line 78, column 2 to column 47)
Chain 2: Rejecting initial value:
Chain 2:   Log probability evaluates to log(0), i.e. negative infinity.
Chain 2:   Stan can't start sampling from this initial value.
Chain 2: Rejecting initial value:
Chain 2:   Log probability evaluates to log(0), i.e. negative infinity.
Chain 2:   Stan can't start sampling from this initial value.
Chain 2: Rejecting initial value:
Chain 2:   Log probability evaluates to log(0), i.e. negative infinity.
Chain 2:   Stan can't start sampling from this initial value.
Chain 2: Rejecting initial value:
Chain 2:   Log probability evaluates to log(0), i.e. negative infinity.
Chain 2:   Stan can't start sampling from this initial value.
Chain 2: Rejecting initial value:
Chain 2:   Log probability evaluates to log(0), i.e. negative infinity.
Chain 2:   Stan can't start sampling from this initial value.
Chain 2: 
Chain 2: Gradient evaluation took 0.000967 seconds
Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 9.67 seconds.
Chain 2: Adjust your expectations accordingly!
Chain 2: 
Chain 2: 
Chain 2: Iteration:    1 / 5000 [  0%]  (Warmup)

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 3).
Chain 3: Rejecting initial value:
Chain 3:   Error evaluating the log probability at the initial value.
Chain 3: Exception: exponential_lpdf: Random variable is -1.0895, but must be nonnegative! (in 'anon_model', line 78, column 2 to column 47)
Chain 3: Rejecting initial value:
Chain 3:   Error evaluating the log probability at the initial value.
Chain 3: Exception: exponential_lpdf: Random variable is -1.71041, but must be nonnegative! (in 'anon_model', line 78, column 2 to column 47)
Chain 3: Rejecting initial value:
Chain 3:   Error evaluating the log probability at the initial value.
Chain 3: Exception: exponential_lpdf: Random variable is -0.560105, but must be nonnegative! (in 'anon_model', line 78, column 2 to column 47)
Chain 3: Rejecting initial value:
Chain 3:   Log probability evaluates to log(0), i.e. negative infinity.
Chain 3:   Stan can't start sampling from this initial value.
Chain 3: 
Chain 3: Gradient evaluation took 0.000778 seconds
Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 7.78 seconds.
Chain 3: Adjust your expectations accordingly!
Chain 3: 
Chain 3: 
Chain 3: Iteration:    1 / 5000 [  0%]  (Warmup)

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 4).
Chain 4: Rejecting initial value:
Chain 4:   Error evaluating the log probability at the initial value.
Chain 4: Exception: exponential_lpdf: Random variable is -1.3763, but must be nonnegative! (in 'anon_model', line 78, column 2 to column 47)
Chain 4: Rejecting initial value:
Chain 4:   Log probability evaluates to log(0), i.e. negative infinity.
Chain 4:   Stan can't start sampling from this initial value.
Chain 4: Rejecting initial value:
Chain 4:   Error evaluating the log probability at the initial value.
Chain 4: Exception: exponential_lpdf: Random variable is -1.98786, but must be nonnegative! (in 'anon_model', line 78, column 2 to column 47)
Chain 4: Rejecting initial value:
Chain 4:   Log probability evaluates to log(0), i.e. negative infinity.
Chain 4:   Stan can't start sampling from this initial value.
Chain 4: Rejecting initial value:
Chain 4:   Error evaluating the log probability at the initial value.
Chain 4: Exception: exponential_lpdf: Random variable is -0.120339, but must be nonnegative! (in 'anon_model', line 78, column 2 to column 47)
Chain 4: Rejecting initial value:
Chain 4:   Log probability evaluates to log(0), i.e. negative infinity.
Chain 4:   Stan can't start sampling from this initial value.
Chain 4: Rejecting initial value:
Chain 4:   Error evaluating the log probability at the initial value.
Chain 4: Exception: exponential_lpdf: Random variable is -0.883203, but must be nonnegative! (in 'anon_model', line 78, column 2 to column 47)
Chain 4: Rejecting initial value:
Chain 4:   Log probability evaluates to log(0), i.e. negative infinity.
Chain 4:   Stan can't start sampling from this initial value.
Chain 4: 
Chain 4: Gradient evaluation took 0.000991 seconds
Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 9.91 seconds.
Chain 4: Adjust your expectations accordingly!
Chain 4: 
Chain 4: 
Chain 4: Iteration:    1 / 5000 [  0%]  (Warmup)
Chain 1: Iteration:  500 / 5000 [ 10%]  (Warmup)
Chain 3: Iteration:  500 / 5000 [ 10%]  (Warmup)
Chain 4: Iteration:  500 / 5000 [ 10%]  (Warmup)
Chain 1: Iteration: 1000 / 5000 [ 20%]  (Warmup)
Chain 3: Iteration: 1000 / 5000 [ 20%]  (Warmup)
Chain 4: Iteration: 1000 / 5000 [ 20%]  (Warmup)
Chain 3: Iteration: 1500 / 5000 [ 30%]  (Warmup)
Chain 1: Iteration: 1500 / 5000 [ 30%]  (Warmup)
Chain 4: Iteration: 1500 / 5000 [ 30%]  (Warmup)
Chain 2: Iteration:  500 / 5000 [ 10%]  (Warmup)
Chain 3: Iteration: 2000 / 5000 [ 40%]  (Warmup)
Chain 1: Iteration: 2000 / 5000 [ 40%]  (Warmup)
Chain 4: Iteration: 2000 / 5000 [ 40%]  (Warmup)
Chain 2: Iteration: 1000 / 5000 [ 20%]  (Warmup)
Chain 3: Iteration: 2500 / 5000 [ 50%]  (Warmup)
Chain 3: Iteration: 2501 / 5000 [ 50%]  (Sampling)
Chain 1: Iteration: 2500 / 5000 [ 50%]  (Warmup)
Chain 1: Iteration: 2501 / 5000 [ 50%]  (Sampling)
Chain 2: Iteration: 1500 / 5000 [ 30%]  (Warmup)
Chain 4: Iteration: 2500 / 5000 [ 50%]  (Warmup)
Chain 4: Iteration: 2501 / 5000 [ 50%]  (Sampling)
Chain 3: Iteration: 3000 / 5000 [ 60%]  (Sampling)
Chain 1: Iteration: 3000 / 5000 [ 60%]  (Sampling)
Chain 2: Iteration: 2000 / 5000 [ 40%]  (Warmup)
Chain 4: Iteration: 3000 / 5000 [ 60%]  (Sampling)
Chain 3: Iteration: 3500 / 5000 [ 70%]  (Sampling)
Chain 1: Iteration: 3500 / 5000 [ 70%]  (Sampling)
Chain 2: Iteration: 2500 / 5000 [ 50%]  (Warmup)
Chain 2: Iteration: 2501 / 5000 [ 50%]  (Sampling)
Chain 4: Iteration: 3500 / 5000 [ 70%]  (Sampling)
Chain 3: Iteration: 4000 / 5000 [ 80%]  (Sampling)
Chain 1: Iteration: 4000 / 5000 [ 80%]  (Sampling)
Chain 2: Iteration: 3000 / 5000 [ 60%]  (Sampling)
Chain 4: Iteration: 4000 / 5000 [ 80%]  (Sampling)
Chain 3: Iteration: 4500 / 5000 [ 90%]  (Sampling)
Chain 1: Iteration: 4500 / 5000 [ 90%]  (Sampling)
Chain 2: Iteration: 3500 / 5000 [ 70%]  (Sampling)
Chain 4: Iteration: 4500 / 5000 [ 90%]  (Sampling)
Chain 3: Iteration: 5000 / 5000 [100%]  (Sampling)
Chain 3: 
Chain 3:  Elapsed Time: 239.44 seconds (Warm-up)
Chain 3:                185.642 seconds (Sampling)
Chain 3:                425.082 seconds (Total)
Chain 3: 
Chain 1: Iteration: 5000 / 5000 [100%]  (Sampling)
Chain 1: 
Chain 1:  Elapsed Time: 248.287 seconds (Warm-up)
Chain 1:                182.865 seconds (Sampling)
Chain 1:                431.152 seconds (Total)
Chain 1: 
Chain 2: Iteration: 4000 / 5000 [ 80%]  (Sampling)
Chain 4: Iteration: 5000 / 5000 [100%]  (Sampling)
Chain 4: 
Chain 4:  Elapsed Time: 274.278 seconds (Warm-up)
Chain 4:                176.014 seconds (Sampling)
Chain 4:                450.292 seconds (Total)
Chain 4: 
Chain 2: Iteration: 4500 / 5000 [ 90%]  (Sampling)
Chain 2: Iteration: 5000 / 5000 [100%]  (Sampling)
Chain 2: 
Chain 2:  Elapsed Time: 328.205 seconds (Warm-up)
Chain 2:                146.374 seconds (Sampling)
Chain 2:                474.579 seconds (Total)
Chain 2: 
fit2c <- add_criterion(fit2c, "loo")

gc()
           used  (Mb) gc trigger  (Mb) limit (Mb)  max used   (Mb)
Ncells  5603569 299.3    8292668 442.9         NA   8292668  442.9
Vcells 17212243 131.4  124350110 948.8      16384 203442360 1552.2
summary(fit2c)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: length2 | cens(censored) ~ Lmax - (Lmax - Lmin) * exp(-(k * hour)^delta) 
         Lmax ~ 1
         Lmin ~ 1
         k ~ 1
         delta ~ trt + (1 | id)
   Data: style (Number of observations: 1362) 
Samples: 4 chains, each with iter = 5000; warmup = 2500; thin = 1;
         total post-warmup samples = 10000

Group-Level Effects: 
~id (Number of levels: 72) 
                    Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(delta_Intercept)     1.40      0.16     1.13     1.75 1.00     1074     2637

Population-Level Effects: 
                  Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Lmax_Intercept       15.89      0.16    15.59    16.20 1.00     6236     6449
Lmin_Intercept        7.31      0.03     7.26     7.36 1.00     9392     7973
k_Intercept           0.18      0.00     0.18     0.18 1.00     5063     6317
delta_Intercept       2.62      0.26     2.14     3.14 1.01      539     1536
delta_trtWest         1.16      0.31     0.56     1.76 1.00      850     2356
delta_trtWestHeat    -0.03      0.31    -0.65     0.57 1.02      677     1259

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     0.55      0.01     0.53     0.58 1.00     8127     7283

Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
gc()
           used  (Mb) gc trigger  (Mb) limit (Mb)  max used   (Mb)
Ncells  5603631 299.3    8292668 442.9         NA   8292668  442.9
Vcells 17212454 131.4   99480088 759.0      16384 203442360 1552.2
plot(fit2c, ask = FALSE)

gc()
           used  (Mb) gc trigger  (Mb) limit (Mb)  max used   (Mb)
Ncells  5338178 285.1    8292668 442.9         NA   8292668  442.9
Vcells 16153074 123.3   72548821 553.6      16384 171505768 1308.5
plot(conditional_effects(fit2c), ask=FALSE)

pp_check(fit2c)
Using 10 posterior samples for ppc type 'dens_overlay' by default.
Warning: Censored responses are not shown in 'pp_check'.

pred2c <- predict(fit2c)
pred2c %>%
  cbind(style) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_smooth(span=1) +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_smooth(aes(y=Estimate), lty=2, span=1) 
`geom_smooth()` using method = 'loess' and formula 'y ~ x'
`geom_smooth()` using method = 'loess' and formula 'y ~ x'

pred2c %>%
  cbind(style) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_smooth(span=1) +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_smooth(aes(y=Estimate), lty=2, span=1)  +
  facet_wrap(~day, scales = "free_y")
`geom_smooth()` using method = 'loess' and formula 'y ~ x'
`geom_smooth()` using method = 'loess' and formula 'y ~ x'

pred2c %>%
  cbind(style) %>%
  group_by(day, trt, hour) %>%
  summarize(length2 = mean(length2),
            Estimate = mean(Estimate)) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_point() +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_line(aes(y=Estimate), lty=2) +
  facet_wrap(~day, scales = "free_y")
`summarise()` has grouped output by 'day', 'trt'. You can override using the `.groups` argument.

loo_compare(fit1c, fit2c)
      elpd_diff se_diff
fit2c    0.0       0.0 
fit1c -812.7      27.7 

Fit2c is preferred, but predictions are not that good relative to observed

hypothesis(fit2c, c("delta_trtWest  = 0",
                    "delta_trtWestHeat = 0",
                    "delta_trtWest = delta_trtWestHeat"))
Hypothesis Tests for class b:
---
'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
'*': For one-sided hypotheses, the posterior probability exceeds 95%;
for two-sided hypotheses, the value tested against lies outside the 95%-CI.
Posterior probabilities of point hypotheses assume equal prior probabilities.

fit3c:

#inits3 <- list(b_Lmax=11.5, b_Lmin=7.5, b_delta=2, b_k=c(.1,0,0), z_1=array(0, dim=c(72,1362)))

inits3 <- function() { list(
  b_Lmax=rnorm(1, 11.5, .25),
  b_Lmin=rnorm(1, 7.5, .25),
  b_delta=rexp(1, .5),
  b_k=c(rexp(1, 1), 0, 0),
  z_1=array(0, dim=c(72,1362))
)
}

#inits3 <- replicate(4, inits3, simplify = FALSE)

prior3 <- c(prior(normal(7.5,.25), nlpar="Lmin"),
            prior(normal(11.5, .25),nlpar="Lmax"),
            prior(exponential(1),nlpar="k"),
            prior(normal(0,0.1), nlpar="k", coef="trtWest"),
            prior(normal(0,0.1), nlpar="k", coef="trtWestHeat"),
            prior(exponential(0.5),nlpar="delta") 
)

fit3c <- brm( # ignore the warning about k. 
  formula=bf(
    length2 | cens(censored) ~ Lmax - (Lmax - Lmin) * exp(-(k*hour)^delta), #Weibull model
    Lmax + Lmin + delta ~ 1, 
    k ~ trt + (1|id),   # 
    nl=TRUE ),
  data=style,
  prior=prior3,
  sample_prior = "yes",
  inits = inits3,
  cores=4,
  chains=4,
  iter=5000,
  control=list(adapt_delta=0.99)
  )
Warning: It appears as if you have specified a lower bounded prior on a parameter that has no natural lower bound.
If this is really what you want, please specify argument 'lb' of 'set_prior' appropriately.
Warning occurred for prior 
b_k ~ exponential(1)
b_delta ~ exponential(0.5)

Compiling Stan program...
Start sampling
starting worker pid=39124 on localhost:11685 at 18:23:17.017
starting worker pid=39138 on localhost:11685 at 18:23:17.253
starting worker pid=39152 on localhost:11685 at 18:23:17.492
starting worker pid=39166 on localhost:11685 at 18:23:17.727

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
Chain 1: 
Chain 1: Gradient evaluation took 0.000706 seconds
Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 7.06 seconds.
Chain 1: Adjust your expectations accordingly!
Chain 1: 
Chain 1: 
Chain 1: Iteration:    1 / 5000 [  0%]  (Warmup)

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
Chain 2: 
Chain 2: Gradient evaluation took 0.012113 seconds
Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 121.13 seconds.
Chain 2: Adjust your expectations accordingly!
Chain 2: 
Chain 2: 
Chain 2: Iteration:    1 / 5000 [  0%]  (Warmup)

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 3).
Chain 3: 
Chain 3: Gradient evaluation took 0.000771 seconds
Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 7.71 seconds.
Chain 3: Adjust your expectations accordingly!
Chain 3: 
Chain 3: 
Chain 3: Iteration:    1 / 5000 [  0%]  (Warmup)

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 4).
Chain 4: 
Chain 4: Gradient evaluation took 0.000995 seconds
Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 9.95 seconds.
Chain 4: Adjust your expectations accordingly!
Chain 4: 
Chain 4: 
Chain 4: Iteration:    1 / 5000 [  0%]  (Warmup)
Chain 3: Iteration:  500 / 5000 [ 10%]  (Warmup)
Chain 2: Iteration:  500 / 5000 [ 10%]  (Warmup)
Chain 4: Iteration:  500 / 5000 [ 10%]  (Warmup)
Chain 3: Iteration: 1000 / 5000 [ 20%]  (Warmup)
Chain 1: Iteration:  500 / 5000 [ 10%]  (Warmup)
Chain 2: Iteration: 1000 / 5000 [ 20%]  (Warmup)
Chain 4: Iteration: 1000 / 5000 [ 20%]  (Warmup)
Chain 2: Iteration: 1500 / 5000 [ 30%]  (Warmup)
Chain 3: Iteration: 1500 / 5000 [ 30%]  (Warmup)
Chain 1: Iteration: 1000 / 5000 [ 20%]  (Warmup)
Chain 4: Iteration: 1500 / 5000 [ 30%]  (Warmup)
Chain 2: Iteration: 2000 / 5000 [ 40%]  (Warmup)
Chain 1: Iteration: 1500 / 5000 [ 30%]  (Warmup)
Chain 3: Iteration: 2000 / 5000 [ 40%]  (Warmup)
Chain 4: Iteration: 2000 / 5000 [ 40%]  (Warmup)
Chain 2: Iteration: 2500 / 5000 [ 50%]  (Warmup)
Chain 2: Iteration: 2501 / 5000 [ 50%]  (Sampling)
Chain 1: Iteration: 2000 / 5000 [ 40%]  (Warmup)
Chain 4: Iteration: 2500 / 5000 [ 50%]  (Warmup)
Chain 4: Iteration: 2501 / 5000 [ 50%]  (Sampling)
Chain 3: Iteration: 2500 / 5000 [ 50%]  (Warmup)
Chain 3: Iteration: 2501 / 5000 [ 50%]  (Sampling)
Chain 2: Iteration: 3000 / 5000 [ 60%]  (Sampling)
Chain 3: Iteration: 3000 / 5000 [ 60%]  (Sampling)
Chain 4: Iteration: 3000 / 5000 [ 60%]  (Sampling)
Chain 1: Iteration: 2500 / 5000 [ 50%]  (Warmup)
Chain 1: Iteration: 2501 / 5000 [ 50%]  (Sampling)
Chain 3: Iteration: 3500 / 5000 [ 70%]  (Sampling)
Chain 2: Iteration: 3500 / 5000 [ 70%]  (Sampling)
Chain 4: Iteration: 3500 / 5000 [ 70%]  (Sampling)
Chain 1: Iteration: 3000 / 5000 [ 60%]  (Sampling)
Chain 3: Iteration: 4000 / 5000 [ 80%]  (Sampling)
Chain 3: Iteration: 4500 / 5000 [ 90%]  (Sampling)
Chain 1: Iteration: 3500 / 5000 [ 70%]  (Sampling)
Chain 4: Iteration: 4000 / 5000 [ 80%]  (Sampling)
Chain 2: Iteration: 4000 / 5000 [ 80%]  (Sampling)
Chain 3: Iteration: 5000 / 5000 [100%]  (Sampling)
Chain 3: 
Chain 3:  Elapsed Time: 391.743 seconds (Warm-up)
Chain 3:                195.878 seconds (Sampling)
Chain 3:                587.621 seconds (Total)
Chain 3: 
Chain 1: Iteration: 4000 / 5000 [ 80%]  (Sampling)
Chain 4: Iteration: 4500 / 5000 [ 90%]  (Sampling)
Chain 2: Iteration: 4500 / 5000 [ 90%]  (Sampling)
Chain 1: Iteration: 4500 / 5000 [ 90%]  (Sampling)
Chain 4: Iteration: 5000 / 5000 [100%]  (Sampling)
Chain 4: 
Chain 4:  Elapsed Time: 386.775 seconds (Warm-up)
Chain 4:                259.497 seconds (Sampling)
Chain 4:                646.272 seconds (Total)
Chain 4: 
Chain 1: Iteration: 5000 / 5000 [100%]  (Sampling)
Chain 1: 
Chain 1:  Elapsed Time: 467.077 seconds (Warm-up)
Chain 1:                203.198 seconds (Sampling)
Chain 1:                670.275 seconds (Total)
Chain 1: 
Chain 2: Iteration: 5000 / 5000 [100%]  (Sampling)
Chain 2: 
Chain 2:  Elapsed Time: 363.982 seconds (Warm-up)
Chain 2:                305.675 seconds (Sampling)
Chain 2:                669.657 seconds (Total)
Chain 2: 
Warning: There were 13 divergent transitions after warmup. See
https://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
to find out why this is a problem and how to eliminate them.
Warning: Examine the pairs() plot to diagnose sampling problems

Warning: Bulk Effective Samples Size (ESS) is too low, indicating posterior means and medians may be unreliable.
Running the chains for more iterations may help. See
https://mc-stan.org/misc/warnings.html#bulk-ess
fit3c <- add_criterion(fit3c, "loo")

gc()
           used  (Mb) gc trigger  (Mb) limit (Mb)  max used   (Mb)
Ncells  5801529 309.9    9991201 533.6         NA   9991201  533.6
Vcells 19434076 148.3  125390965 956.7      16384 203442360 1552.2
summary(fit3c)
Warning: There were 13 divergent transitions after warmup. Increasing adapt_delta above 0.99 may help. See http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: length2 | cens(censored) ~ Lmax - (Lmax - Lmin) * exp(-(k * hour)^delta) 
         Lmax ~ 1
         Lmin ~ 1
         delta ~ 1
         k ~ trt + (1 | id)
   Data: style (Number of observations: 1362) 
Samples: 4 chains, each with iter = 5000; warmup = 2500; thin = 1;
         total post-warmup samples = 10000

Group-Level Effects: 
~id (Number of levels: 72) 
                Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(k_Intercept)     0.07      0.01     0.06     0.08 1.01      458     1114

Population-Level Effects: 
                Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Lmax_Intercept     13.23      0.12    13.01    13.47 1.00     3324     4655
Lmin_Intercept      7.29      0.03     7.24     7.34 1.00     2989     4500
delta_Intercept     2.51      0.07     2.37     2.65 1.00     2609     4038
k_Intercept         0.26      0.01     0.23     0.29 1.01      270      471
k_trtWest          -0.08      0.02    -0.12    -0.04 1.01      298      591
k_trtWestHeat      -0.03      0.02    -0.07     0.01 1.00      262      583

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     0.50      0.01     0.48     0.52 1.00     3692     5473

Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
gc()
           used  (Mb) gc trigger  (Mb) limit (Mb)  max used   (Mb)
Ncells  5801593 309.9    9991201 533.6         NA   9991201  533.6
Vcells 19434303 148.3  100312772 765.4      16384 203442360 1552.2
pairs(fit3c, pars="^b_")

plot(fit3c, ask = FALSE)

gc()
           used  (Mb) gc trigger  (Mb) limit (Mb)  max used   (Mb)
Ncells  5989463 319.9    9991201 533.6         NA   9991201  533.6
Vcells 21923939 167.3   64200175 489.9      16384 203442360 1552.2
plot(conditional_effects(fit3c), ask=FALSE)

pp_check(fit3c)
Using 10 posterior samples for ppc type 'dens_overlay' by default.
Warning: Censored responses are not shown in 'pp_check'.

pred3c <- predict(fit3c)
pred3c %>%
  cbind(style) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_smooth(span=1) +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_smooth(aes(y=Estimate), lty=2, span=1) 
`geom_smooth()` using method = 'loess' and formula 'y ~ x'
`geom_smooth()` using method = 'loess' and formula 'y ~ x'

pred3c %>%
  cbind(style) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_smooth(span=1) +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_smooth(aes(y=Estimate), lty=2, span=1)  +
  facet_wrap(~day, scales = "free_y")
`geom_smooth()` using method = 'loess' and formula 'y ~ x'
`geom_smooth()` using method = 'loess' and formula 'y ~ x'

pred3c %>%
  cbind(style) %>%
  group_by(day, trt, hour) %>%
  summarize(length2 = mean(length2),
            Estimate = mean(Estimate)) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_point() +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_line(aes(y=Estimate), lty=2) +
  facet_wrap(~day, scales = "free_y")
`summarise()` has grouped output by 'day', 'trt'. You can override using the `.groups` argument.

loo_compare(fit1c, fit2c, fit3c)
      elpd_diff se_diff
fit3c    0.0       0.0 
fit2c -153.9      28.1 
fit1c -966.7      41.4 

fit_4c: k and delta ~ trt.

inits4 <- function() { list(
  b_Lmax=rnorm(1, 11.5, .25),
  b_Lmin=rnorm(1, 7.5, .25),
  b_delta=c(rexp(1, .5), 0, 0),
  b_k=c(rexp(1, 1), 0, 0),
  z_1=array(0, dim=c(72,1)),
  z_2=array(0, dim=c(72,1))
)
}

prior4 <- c(prior(normal(7.5,.25), nlpar="Lmin"),
            prior(normal(11.5, .25),nlpar="Lmax"),
            prior(exponential(1),nlpar="k"),
            prior(normal(0,0.02), nlpar="k", coef="trtWest"),
            prior(normal(0,0.02), nlpar="k", coef="trtWestHeat"),
            prior(exponential(0.5), nlpar="delta"), # 1 is exponential model
            prior(normal(0,0.25), nlpar="delta", coef="trtWest"),
            prior(normal(0,0.25), nlpar="delta", coef="trtWestHeat")
)

fit_4c <- brm(
  formula=bf(
    length2 | cens(censored) ~ Lmax - (Lmax - Lmin) * exp(-(k*hour)^delta), #Weibull model
    Lmax + Lmin ~ 1, 
    delta + k ~ trt + (1|id),   # 
    nl=TRUE ),
  data=style,
  prior=prior4,
  inits=inits4,
  cores=4,
  chains=4,
  iter=10000, control = list(adapt_delta=0.95)
) 
Warning: It appears as if you have specified a lower bounded prior on a parameter that has no natural lower bound.
If this is really what you want, please specify argument 'lb' of 'set_prior' appropriately.
Warning occurred for prior 
b_k ~ exponential(1)
b_delta ~ exponential(0.5)

Compiling Stan program...
Start sampling
starting worker pid=43931 on localhost:11685 at 22:42:07.548
starting worker pid=43945 on localhost:11685 at 22:42:07.787
starting worker pid=43959 on localhost:11685 at 22:42:08.018
starting worker pid=43973 on localhost:11685 at 22:42:08.250

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
Chain 1: Rejecting initial value:
Chain 1:   Log probability evaluates to log(0), i.e. negative infinity.
Chain 1:   Stan can't start sampling from this initial value.
Chain 1: 
Chain 1: Gradient evaluation took 0.001149 seconds
Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 11.49 seconds.
Chain 1: Adjust your expectations accordingly!
Chain 1: 
Chain 1: 
Chain 1: Iteration:    1 / 10000 [  0%]  (Warmup)

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
Chain 2: 
Chain 2: Gradient evaluation took 0.001544 seconds
Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 15.44 seconds.
Chain 2: Adjust your expectations accordingly!
Chain 2: 
Chain 2: 
Chain 2: Iteration:    1 / 10000 [  0%]  (Warmup)

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 3).
Chain 3: 
Chain 3: Gradient evaluation took 0.001653 seconds
Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 16.53 seconds.
Chain 3: Adjust your expectations accordingly!
Chain 3: 
Chain 3: 
Chain 3: Iteration:    1 / 10000 [  0%]  (Warmup)

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 4).
Chain 4: Rejecting initial value:
Chain 4:   Log probability evaluates to log(0), i.e. negative infinity.
Chain 4:   Stan can't start sampling from this initial value.
Chain 4: 
Chain 4: Gradient evaluation took 0.001191 seconds
Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 11.91 seconds.
Chain 4: Adjust your expectations accordingly!
Chain 4: 
Chain 4: 
Chain 4: Iteration:    1 / 10000 [  0%]  (Warmup)
Chain 4: Iteration: 1000 / 10000 [ 10%]  (Warmup)
Chain 3: Iteration: 1000 / 10000 [ 10%]  (Warmup)
Chain 4: Iteration: 2000 / 10000 [ 20%]  (Warmup)
Chain 3: Iteration: 2000 / 10000 [ 20%]  (Warmup)
Chain 2: Iteration: 1000 / 10000 [ 10%]  (Warmup)
Chain 4: Iteration: 3000 / 10000 [ 30%]  (Warmup)
Chain 2: Iteration: 2000 / 10000 [ 20%]  (Warmup)
Chain 3: Iteration: 3000 / 10000 [ 30%]  (Warmup)
Chain 4: Iteration: 4000 / 10000 [ 40%]  (Warmup)
Chain 1: Iteration: 1000 / 10000 [ 10%]  (Warmup)
Chain 2: Iteration: 3000 / 10000 [ 30%]  (Warmup)
Chain 3: Iteration: 4000 / 10000 [ 40%]  (Warmup)
Chain 4: Iteration: 5000 / 10000 [ 50%]  (Warmup)
Chain 4: Iteration: 5001 / 10000 [ 50%]  (Sampling)
Chain 2: Iteration: 4000 / 10000 [ 40%]  (Warmup)
Chain 3: Iteration: 5000 / 10000 [ 50%]  (Warmup)
Chain 3: Iteration: 5001 / 10000 [ 50%]  (Sampling)
Chain 4: Iteration: 6000 / 10000 [ 60%]  (Sampling)
Chain 2: Iteration: 5000 / 10000 [ 50%]  (Warmup)
Chain 2: Iteration: 5001 / 10000 [ 50%]  (Sampling)
Chain 3: Iteration: 6000 / 10000 [ 60%]  (Sampling)
Chain 4: Iteration: 7000 / 10000 [ 70%]  (Sampling)
Chain 2: Iteration: 6000 / 10000 [ 60%]  (Sampling)
Chain 3: Iteration: 7000 / 10000 [ 70%]  (Sampling)
Chain 4: Iteration: 8000 / 10000 [ 80%]  (Sampling)
Chain 2: Iteration: 7000 / 10000 [ 70%]  (Sampling)
Chain 3: Iteration: 8000 / 10000 [ 80%]  (Sampling)
Chain 4: Iteration: 9000 / 10000 [ 90%]  (Sampling)
Chain 2: Iteration: 8000 / 10000 [ 80%]  (Sampling)
Chain 3: Iteration: 9000 / 10000 [ 90%]  (Sampling)
Chain 1: Iteration: 2000 / 10000 [ 20%]  (Warmup)
Chain 4: Iteration: 10000 / 10000 [100%]  (Sampling)
Chain 4: 
Chain 4:  Elapsed Time: 718.1 seconds (Warm-up)
Chain 4:                849.172 seconds (Sampling)
Chain 4:                1567.27 seconds (Total)
Chain 4: 
Chain 2: Iteration: 9000 / 10000 [ 90%]  (Sampling)
Chain 3: Iteration: 10000 / 10000 [100%]  (Sampling)
Chain 3: 
Chain 3:  Elapsed Time: 817.137 seconds (Warm-up)
Chain 3:                798.559 seconds (Sampling)
Chain 3:                1615.7 seconds (Total)
Chain 3: 
Chain 1: Iteration: 3000 / 10000 [ 30%]  (Warmup)
Chain 2: Iteration: 10000 / 10000 [100%]  (Sampling)
Chain 2: 
Chain 2:  Elapsed Time: 963.456 seconds (Warm-up)
Chain 2:                726.01 seconds (Sampling)
Chain 2:                1689.47 seconds (Total)
Chain 2: 
Chain 1: Iteration: 4000 / 10000 [ 40%]  (Warmup)
Chain 1: Iteration: 5000 / 10000 [ 50%]  (Warmup)
Chain 1: Iteration: 5001 / 10000 [ 50%]  (Sampling)
Chain 1: Iteration: 6000 / 10000 [ 60%]  (Sampling)
Chain 1: Iteration: 7000 / 10000 [ 70%]  (Sampling)
Chain 1: Iteration: 8000 / 10000 [ 80%]  (Sampling)
Chain 1: Iteration: 9000 / 10000 [ 90%]  (Sampling)
Chain 1: Iteration: 10000 / 10000 [100%]  (Sampling)
Chain 1: 
Chain 1:  Elapsed Time: 1751.66 seconds (Warm-up)
Chain 1:                275.371 seconds (Sampling)
Chain 1:                2027.03 seconds (Total)
Chain 1: 
Warning: There were 13 divergent transitions after warmup. See
https://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
to find out why this is a problem and how to eliminate them.
Warning: Examine the pairs() plot to diagnose sampling problems
fit_4c <- add_criterion(fit_4c, "loo")

gc()
           used  (Mb) gc trigger   (Mb) limit (Mb)  max used   (Mb)
Ncells  6038215 322.5    9991201  533.6         NA   9991201  533.6
Vcells 27018499 206.2  239593280 1828.0      16384 352107441 2686.4
summary(fit_4c)
Warning: There were 13 divergent transitions after warmup. Increasing adapt_delta above 0.95 may help. See http://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: length2 | cens(censored) ~ Lmax - (Lmax - Lmin) * exp(-(k * hour)^delta) 
         Lmax ~ 1
         Lmin ~ 1
         delta ~ trt + (1 | id)
         k ~ trt + (1 | id)
   Data: style (Number of observations: 1362) 
Samples: 4 chains, each with iter = 10000; warmup = 5000; thin = 1;
         total post-warmup samples = 20000

Group-Level Effects: 
~id (Number of levels: 72) 
                    Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(delta_Intercept)     0.44      0.08     0.30     0.60 1.00    10160    14357
sd(k_Intercept)         0.07      0.01     0.06     0.08 1.00     6734    10868

Population-Level Effects: 
                  Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Lmax_Intercept       13.37      0.12    13.14    13.61 1.00    29252    15954
Lmin_Intercept        7.30      0.03     7.25     7.35 1.00    33915    15137
delta_Intercept       2.53      0.12     2.31     2.77 1.00    20251    15535
delta_trtWest         0.22      0.15    -0.07     0.53 1.00    26303    16784
delta_trtWestHeat     0.07      0.14    -0.20     0.34 1.00    22649    16015
k_Intercept           0.24      0.01     0.21     0.26 1.00     4716     8526
k_trtWest            -0.04      0.01    -0.06    -0.01 1.00     6706    10191
k_trtWestHeat        -0.00      0.01    -0.03     0.02 1.00     6205     9494

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     0.48      0.01     0.47     0.50 1.00    25540    15145

Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
gc()
           used  (Mb) gc trigger   (Mb) limit (Mb)  max used   (Mb)
Ncells  6038280 322.5    9991201  533.6         NA   9991201  533.6
Vcells 27018753 206.2  191674624 1462.4      16384 352107441 2686.4
pairs(fit_4c, pars = "^b_")
plot(fit_4c, ask = FALSE)

gc()
           used  (Mb) gc trigger   (Mb) limit (Mb)  max used   (Mb)
Ncells  6193507 330.8    9991201  533.6         NA   9991201  533.6
Vcells 31694194 241.9  153339700 1169.9      16384 352107441 2686.4
plot(conditional_effects(fit_4c), ask=FALSE)

pp_check(fit_4c)
Using 10 posterior samples for ppc type 'dens_overlay' by default.
Warning: Censored responses are not shown in 'pp_check'.

pred_4c <- predict(fit_4c)
pred_4c %>%
  cbind(style) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_smooth(span=1) +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_smooth(aes(y=Estimate), lty=2, span=1) 
`geom_smooth()` using method = 'loess' and formula 'y ~ x'
`geom_smooth()` using method = 'loess' and formula 'y ~ x'

pred_4c %>%
  cbind(style) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_smooth(span=1) +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_smooth(aes(y=Estimate), lty=2, span=1)  +
  facet_wrap(~day, scales="free_y")
`geom_smooth()` using method = 'loess' and formula 'y ~ x'
`geom_smooth()` using method = 'loess' and formula 'y ~ x'

pred_4c %>%
  cbind(style) %>%
  group_by(trt, hour) %>%
  summarize(length2 = mean(length2),
            Estimate = mean(Estimate)) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_line() +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_line(aes(y=Estimate), lty=2) 
`summarise()` has grouped output by 'trt'. You can override using the `.groups` argument.

pred_4c %>%
  cbind(style) %>%
  group_by(day, trt, hour) %>%
  summarize(length2 = mean(length2),
            Estimate = mean(Estimate)) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_point() +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_line(aes(y=Estimate), lty=2) +
  facet_wrap(~day, scales = "free_y")
`summarise()` has grouped output by 'day', 'trt'. You can override using the `.groups` argument.

loo_compare(fit1c, fit2c, fit3c, fit_4c)
       elpd_diff se_diff
fit_4c    0.0       0.0 
fit3c   -22.8       6.2 
fit2c  -176.7      27.8 
fit1c  -989.5      42.2 

fit_5c: k + delta ~ trt + (1|id); Lmax ~ 1|day

inits5 <- function() { list(
  b_Lmax=rnorm(1, 11.5, .25),
  b_Lmin=rnorm(1, 7.5, .25),
  b_delta=c(rexp(1, .5), 0, 0),
  b_k=c(rexp(1, 1), 0, 0),
  z_1=array(0, dim=c(8,1)),
  z_2=array(0, dim=c(72,1)),
  z_3=array(0, dim=c(72,1))
)
}

prior5 <- c(prior(normal(7.5,.25), nlpar="Lmin"),
            prior(normal(11.5, .25),nlpar="Lmax"),
            prior(exponential(1),nlpar="k"),
            prior(normal(0,0.02), nlpar="k", coef="trtWest"),
            prior(normal(0,0.02), nlpar="k", coef="trtWestHeat"),
            prior(exponential(0.5), nlpar="delta"), # 1 is exponential model
            prior(normal(0,0.25), nlpar="delta", coef="trtWest"),
            prior(normal(0,0.25), nlpar="delta", coef="trtWestHeat")
)

fit_5c <- brm(
  formula=bf(
    length2 | cens(censored) ~ Lmax - (Lmax - Lmin) * exp(-(k*hour)^delta), #Weibull model
    Lmin ~ 1, 
    Lmax ~ (1|day),
    delta + k ~ trt + (1|id),   # 
    nl=TRUE ),
  data=style,
  prior=prior5,
  sample_prior = "yes",
  inits=inits5,
  cores=4,
  chains=4,
  iter=2000
) 
Warning: It appears as if you have specified a lower bounded prior on a parameter that has no natural lower bound.
If this is really what you want, please specify argument 'lb' of 'set_prior' appropriately.
Warning occurred for prior 
b_k ~ exponential(1)
b_delta ~ exponential(0.5)

Compiling Stan program...
Start sampling
starting worker pid=72742 on localhost:11685 at 10:44:09.560
starting worker pid=72756 on localhost:11685 at 10:44:09.797
starting worker pid=72770 on localhost:11685 at 10:44:10.018
starting worker pid=72784 on localhost:11685 at 10:44:10.242

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
Chain 1: 
Chain 1: Gradient evaluation took 0.001723 seconds
Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 17.23 seconds.
Chain 1: Adjust your expectations accordingly!
Chain 1: 
Chain 1: 
Chain 1: Iteration:    1 / 2000 [  0%]  (Warmup)

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
Chain 2: Rejecting initial value:
Chain 2:   Log probability evaluates to log(0), i.e. negative infinity.
Chain 2:   Stan can't start sampling from this initial value.
Chain 2: 
Chain 2: Gradient evaluation took 0.001255 seconds
Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 12.55 seconds.
Chain 2: Adjust your expectations accordingly!
Chain 2: 
Chain 2: 
Chain 2: Iteration:    1 / 2000 [  0%]  (Warmup)

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 3).
Chain 3: Rejecting initial value:
Chain 3:   Log probability evaluates to log(0), i.e. negative infinity.
Chain 3:   Stan can't start sampling from this initial value.
Chain 3: 
Chain 3: Gradient evaluation took 0.003629 seconds
Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 36.29 seconds.
Chain 3: Adjust your expectations accordingly!
Chain 3: 
Chain 3: 
Chain 3: Iteration:    1 / 2000 [  0%]  (Warmup)

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 4).
Chain 4: 
Chain 4: Gradient evaluation took 0.003214 seconds
Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 32.14 seconds.
Chain 4: Adjust your expectations accordingly!
Chain 4: 
Chain 4: 
Chain 4: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 1: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 4: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 3: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 4: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 1: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 3: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 1: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 4: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 3: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 1: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 4: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 3: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 1: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 1: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 4: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 4: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 3: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 3: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 1: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 3: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 4: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 1: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 3: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 4: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 2: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 1: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 3: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 4: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 1: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 3: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 1: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 1: 
Chain 1:  Elapsed Time: 223.221 seconds (Warm-up)
Chain 1:                91.087 seconds (Sampling)
Chain 1:                314.308 seconds (Total)
Chain 1: 
Chain 4: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 3: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 3: 
Chain 3:  Elapsed Time: 224.296 seconds (Warm-up)
Chain 3:                94.823 seconds (Sampling)
Chain 3:                319.119 seconds (Total)
Chain 3: 
Chain 4: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 4: 
Chain 4:  Elapsed Time: 218.1 seconds (Warm-up)
Chain 4:                98.073 seconds (Sampling)
Chain 4:                316.173 seconds (Total)
Chain 4: 
Chain 2: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 2: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 2: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 2: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 2: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 2: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 2: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 2: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 2: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 2: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 2: 
Chain 2:  Elapsed Time: 678.57 seconds (Warm-up)
Chain 2:                145.349 seconds (Sampling)
Chain 2:                823.919 seconds (Total)
Chain 2: 
fit_5c <- add_criterion(fit_5c, "loo")

gc()
           used  (Mb) gc trigger  (Mb) limit (Mb)  max used   (Mb)
Ncells  6507990 347.6    9991201 533.6         NA   9991201  533.6
Vcells 32971270 251.6  130397925 994.9      16384 428446769 3268.8
summary(fit_5c)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: length2 | cens(censored) ~ Lmax - (Lmax - Lmin) * exp(-(k * hour)^delta) 
         Lmin ~ 1
         Lmax ~ (1 | day)
         delta ~ trt + (1 | id)
         k ~ trt + (1 | id)
   Data: style (Number of observations: 1362) 
Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup samples = 4000

Group-Level Effects: 
~day (Number of levels: 8) 
                   Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Lmax_Intercept)     2.03      0.56     1.24     3.39 1.01      604      820

~id (Number of levels: 72) 
                    Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(delta_Intercept)     1.85      0.28     1.37     2.49 1.00      951     1763
sd(k_Intercept)         0.03      0.00     0.02     0.03 1.00     1032     1923

Population-Level Effects: 
                  Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Lmin_Intercept        7.36      0.02     7.32     7.40 1.00     2565     2544
Lmax_Intercept       11.47      0.23    11.02    11.94 1.00     1305     2127
delta_Intercept       4.35      0.36     3.70     5.10 1.00      885     1889
delta_trtWest        -0.00      0.23    -0.45     0.46 1.00     1892     2752
delta_trtWestHeat     0.06      0.22    -0.39     0.50 1.00     2013     2535
k_Intercept           0.32      0.01     0.31     0.34 1.00      998     1918
k_trtWest            -0.09      0.01    -0.10    -0.07 1.00      868     1515
k_trtWestHeat        -0.02      0.01    -0.04    -0.01 1.00      922     1379

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     0.45      0.01     0.43     0.47 1.00     3597     2868

Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
gc()
           used  (Mb) gc trigger  (Mb) limit (Mb)  max used   (Mb)
Ncells  6507679 347.6    9991201 533.6         NA   9991201  533.6
Vcells 32666777 249.3  104318340 795.9      16384 428446769 3268.8
plot(fit_5c, ask = FALSE)

gc()
           used  (Mb) gc trigger  (Mb) limit (Mb)  max used   (Mb)
Ncells  6693858 357.5    9991201 533.6         NA   9991201  533.6
Vcells 33875577 258.5  104318340 795.9      16384 428446769 3268.8
plot(conditional_effects(fit_5c), ask=FALSE)

pp_check(fit_5c)
Using 10 posterior samples for ppc type 'dens_overlay' by default.
Warning: Censored responses are not shown in 'pp_check'.

pred_5c <- predict(fit_5c)
pred_5c %>%
  cbind(style) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_smooth(span=1) +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_smooth(aes(y=Estimate), lty=2, span=1) 
`geom_smooth()` using method = 'loess' and formula 'y ~ x'
`geom_smooth()` using method = 'loess' and formula 'y ~ x'

pred_5c %>%
  cbind(style) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_smooth(span=1) +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_smooth(aes(y=Estimate), lty=2, span=1)  +
  facet_wrap(~day)
`geom_smooth()` using method = 'loess' and formula 'y ~ x'
`geom_smooth()` using method = 'loess' and formula 'y ~ x'

pred_5c %>%
  cbind(style) %>%
  group_by(trt, hour) %>%
  summarize(length2 = mean(length2),
            Estimate = mean(Estimate)) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_line() +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_line(aes(y=Estimate), lty=2) 
`summarise()` has grouped output by 'trt'. You can override using the `.groups` argument.

pred_5c %>%
  cbind(style) %>%
  group_by(day, trt, hour) %>%
  summarize(length2 = mean(length2),
            Estimate = mean(Estimate)) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_point() +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_line(aes(y=Estimate), lty=2) +
  facet_wrap(~day, scales = "free_y")
`summarise()` has grouped output by 'day', 'trt'. You can override using the `.groups` argument.

loo_compare(fit1c, fit2c, fit3c, fit_4c, fit_5c)
       elpd_diff se_diff
fit_5c     0.0       0.0
fit_4c  -108.8      10.8
fit3c   -131.5      13.1
fit2c   -285.4      30.6
fit1c  -1098.2      43.9

fit_6c:


inits6 <- function() { list(
  b_Lmax=rnorm(1, 11.5, .25),
  b_Lmin=rnorm(1, 7.5, .25),
  b_delta=c(rexp(1, .5), 0, 0),
  b_k=c(rexp(1, 1), 0, 0),
  z_1=array(0, dim=c(8,1)),
  z_2=array(0, dim=c(8,1)),
  z_3=array(0, dim=c(72,1)),
  z_4=array(0, dim=c(72,1))
)
}

prior6 <- c(prior(normal(7.5,.25), nlpar="Lmin"),
            prior(normal(11.5, .25),nlpar="Lmax"),
            prior(exponential(1),nlpar="k"),
            prior(normal(0,0.04), nlpar="k", coef="trtWest"),
            prior(normal(0,0.04), nlpar="k", coef="trtWestHeat"),
            prior(exponential(0.5), nlpar="delta"), # 1 is exponential model
            prior(normal(0,0.25), nlpar="delta", coef="trtWest"),
            prior(normal(0,0.25), nlpar="delta", coef="trtWestHeat")
)

fit_6c <- brm(
  formula=bf(
    length2 | cens(censored) ~ Lmax - (Lmax - Lmin) * exp(-(k*hour)^delta), #Weibull model
    Lmax + Lmin ~ (1|day),
    delta + k ~ trt + (1|id),   # 
    nl=TRUE ),
  data=style,
  prior=prior6,
  sample_prior = "yes",
  inits=inits6,
  cores=4,
  chains=4,
  iter=2000)
Warning: It appears as if you have specified a lower bounded prior on a parameter that has no natural lower bound.
If this is really what you want, please specify argument 'lb' of 'set_prior' appropriately.
Warning occurred for prior 
b_k ~ exponential(1)
b_delta ~ exponential(0.5)

Compiling Stan program...
Start sampling
starting worker pid=71112 on localhost:11685 at 09:46:45.970
starting worker pid=71127 on localhost:11685 at 09:46:46.356
starting worker pid=71141 on localhost:11685 at 09:46:46.651
starting worker pid=71155 on localhost:11685 at 09:46:46.913

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 1).
Chain 1: 
Chain 1: Gradient evaluation took 0.00154 seconds
Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 15.4 seconds.
Chain 1: Adjust your expectations accordingly!
Chain 1: 
Chain 1: 
Chain 1: Iteration:    1 / 2000 [  0%]  (Warmup)

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 2).
Chain 2: 
Chain 2: Gradient evaluation took 0.001593 seconds
Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 15.93 seconds.
Chain 2: Adjust your expectations accordingly!
Chain 2: 
Chain 2: 
Chain 2: Iteration:    1 / 2000 [  0%]  (Warmup)

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 3).
Chain 3: 
Chain 3: Gradient evaluation took 0.001645 seconds
Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 16.45 seconds.
Chain 3: Adjust your expectations accordingly!
Chain 3: 
Chain 3: 
Chain 3: Iteration:    1 / 2000 [  0%]  (Warmup)

SAMPLING FOR MODEL 'anon_model' NOW (CHAIN 4).
Chain 4: Rejecting initial value:
Chain 4:   Log probability evaluates to log(0), i.e. negative infinity.
Chain 4:   Stan can't start sampling from this initial value.
Chain 4: 
Chain 4: Gradient evaluation took 0.003294 seconds
Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 32.94 seconds.
Chain 4: Adjust your expectations accordingly!
Chain 4: 
Chain 4: 
Chain 4: Iteration:    1 / 2000 [  0%]  (Warmup)
Chain 1: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 1: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 2: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 1: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 4: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 3: Iteration:  200 / 2000 [ 10%]  (Warmup)
Chain 1: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 2: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 4: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 3: Iteration:  400 / 2000 [ 20%]  (Warmup)
Chain 1: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 1: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 2: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 4: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 3: Iteration:  600 / 2000 [ 30%]  (Warmup)
Chain 1: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 2: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 4: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 3: Iteration:  800 / 2000 [ 40%]  (Warmup)
Chain 1: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 2: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 2: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 3: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 3: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 4: Iteration: 1000 / 2000 [ 50%]  (Warmup)
Chain 4: Iteration: 1001 / 2000 [ 50%]  (Sampling)
Chain 2: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 1: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 3: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 2: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 1: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 4: Iteration: 1200 / 2000 [ 60%]  (Sampling)
Chain 3: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 2: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 1: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 1: 
Chain 1:  Elapsed Time: 215.768 seconds (Warm-up)
Chain 1:                135.726 seconds (Sampling)
Chain 1:                351.494 seconds (Total)
Chain 1: 
Chain 2: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 3: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 4: Iteration: 1400 / 2000 [ 70%]  (Sampling)
Chain 2: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 2: 
Chain 2:  Elapsed Time: 267.888 seconds (Warm-up)
Chain 2:                97.123 seconds (Sampling)
Chain 2:                365.011 seconds (Total)
Chain 2: 
Chain 3: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 4: Iteration: 1600 / 2000 [ 80%]  (Sampling)
Chain 3: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 3: 
Chain 3:  Elapsed Time: 270.062 seconds (Warm-up)
Chain 3:                102.446 seconds (Sampling)
Chain 3:                372.508 seconds (Total)
Chain 3: 
Chain 4: Iteration: 1800 / 2000 [ 90%]  (Sampling)
Chain 4: Iteration: 2000 / 2000 [100%]  (Sampling)
Chain 4: 
Chain 4:  Elapsed Time: 265.939 seconds (Warm-up)
Chain 4:                125.089 seconds (Sampling)
Chain 4:                391.028 seconds (Total)
Chain 4: 
) 
Error: unexpected ')' in ")"
summary(fit_6c)
 Family: gaussian 
  Links: mu = identity; sigma = identity 
Formula: length2 | cens(censored) ~ Lmax - (Lmax - Lmin) * exp(-(k * hour)^delta) 
         Lmax ~ (1 | day)
         Lmin ~ (1 | day)
         delta ~ trt + (1 | id)
         k ~ trt + (1 | id)
   Data: style (Number of observations: 1362) 
Samples: 4 chains, each with iter = 2000; warmup = 1000; thin = 1;
         total post-warmup samples = 4000

Group-Level Effects: 
~day (Number of levels: 8) 
                   Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(Lmax_Intercept)     1.73      0.46     1.08     2.83 1.00      784     1330
sd(Lmin_Intercept)     0.51      0.16     0.30     0.90 1.00     1183     1699

~id (Number of levels: 72) 
                    Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sd(delta_Intercept)     1.22      0.22     0.83     1.69 1.00      925     1704
sd(k_Intercept)         0.03      0.00     0.02     0.03 1.00     1146     2011

Population-Level Effects: 
                  Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
Lmax_Intercept       11.44      0.23    10.97    11.89 1.00     1255     1750
Lmin_Intercept        7.47      0.15     7.19     7.77 1.00      853     1393
delta_Intercept       4.53      0.28     4.01     5.12 1.00     1021     1731
delta_trtWest        -0.08      0.21    -0.50     0.33 1.00     2637     2562
delta_trtWestHeat     0.10      0.21    -0.31     0.51 1.00     2314     3031
k_Intercept           0.34      0.01     0.33     0.35 1.00      849     1459
k_trtWest            -0.10      0.01    -0.12    -0.08 1.00      763     1477
k_trtWestHeat        -0.03      0.01    -0.05    -0.01 1.00      785     1277

Family Specific Parameters: 
      Estimate Est.Error l-95% CI u-95% CI Rhat Bulk_ESS Tail_ESS
sigma     0.39      0.01     0.38     0.41 1.00     3236     3077

Samples were drawn using sampling(NUTS). For each parameter, Bulk_ESS
and Tail_ESS are effective sample size measures, and Rhat is the potential
scale reduction factor on split chains (at convergence, Rhat = 1).
gc()
           used  (Mb) gc trigger   (Mb) limit (Mb)  max used   (Mb)
Ncells  6498485 347.1    9991201  533.6         NA   9991201  533.6
Vcells 37740970 288.0  318354307 2428.9      16384 428446769 3268.8
plot(fit_6c, ask = FALSE)

gc()
           used  (Mb) gc trigger   (Mb) limit (Mb)  max used   (Mb)
Ncells  6703482 358.1    9991201  533.6         NA   9991201  533.6
Vcells 39084255 298.2  203746757 1554.5      16384 428446769 3268.8
plot(conditional_effects(fit_6c), ask=FALSE)

pp_check(fit_6c)
Using 10 posterior samples for ppc type 'dens_overlay' by default.
Warning: Censored responses are not shown in 'pp_check'.

pred_6c <- predict(fit_6c) 
pred_6c %>%
  cbind(style) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_smooth(span=1) +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_smooth(aes(y=Estimate), lty=2, span=1) 
`geom_smooth()` using method = 'loess' and formula 'y ~ x'
`geom_smooth()` using method = 'loess' and formula 'y ~ x'

pred_6c %>%
  cbind(style) %>%
  ggplot(aes(x=hour, y=length, color=trt)) +
  geom_smooth(span=1) +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_smooth(aes(y=Estimate), lty=2, span=1)  +
  facet_wrap(~day, scales = "free_y")
`geom_smooth()` using method = 'loess' and formula 'y ~ x'
Warning: Removed 39 rows containing non-finite values (stat_smooth).
`geom_smooth()` using method = 'loess' and formula 'y ~ x'

ggsave("../output/style_fit_plot.pdf", width=6.5, height = 4)
Warning: Removed 13 rows containing missing values (geom_point).
save.image("../output/StyleModelFits.Rdata")

compare models

loo_compare(fit1c, fit2c, fit3c, fit_4c, fit_5c, fit_6c)
       elpd_diff se_diff
fit_6c     0.0       0.0
fit_5c  -157.6      20.1
fit_4c  -266.4      22.7
fit3c   -289.1      23.6
fit2c   -443.0      33.6
fit1c  -1255.8      43.9

posterior of best model

Get the posterior

post_6c <- posterior_samples(fit_6c, pars="b.*")

posterior for inflection point

# inflection point equation
infl.pt <- function(delta, k) {
  (1/k) *
    ((delta -1) / delta)^(1/delta)
}

post_6c$infl_intercept <- infl.pt(post_6c$b_delta_Intercept, post_6c$b_k_Intercept)

post_6c$infl_trtWest <-
  infl.pt(
    post_6c$b_delta_Intercept + post_6c$b_delta_trtWest,
    post_6c$b_k_Intercept + post_6c$b_k_trtWest)

post_6c$infl_trtWestHeat <-
  infl.pt(
    post_6c$b_delta_Intercept+post_6c$b_delta_trtWestHeat,
    post_6c$b_k_Intercept + post_6c$b_k_trtWestHeat)
apply(post_6c, 2, function(x) mean(x))
         b_Lmax_Intercept          b_Lmin_Intercept         b_delta_Intercept           b_delta_trtWest 
             1.143693e+01              7.474550e+00              4.534387e+00             -8.308510e-02 
      b_delta_trtWestHeat             b_k_Intercept               b_k_trtWest           b_k_trtWestHeat 
             9.522676e-02              3.400632e-01             -1.007582e-01             -3.123653e-02 
             prior_b_Lmax              prior_b_Lmin   prior_b_delta_Intercept     prior_b_delta_trtWest 
             1.149030e+01              7.500043e+00              1.967551e+00             -2.220422e-03 
prior_b_delta_trtWestHeat       prior_b_k_Intercept         prior_b_k_trtWest     prior_b_k_trtWestHeat 
             1.410824e-03              1.022834e+00             -6.267506e-05             -5.693913e-04 
           infl_intercept              infl_trtWest          infl_trtWestHeat 
             2.782406e+00              3.945179e+00              3.071197e+00 

posterior probability that inflection point for trtWestHeat is greater than inflection point for East

sum( (post_6c$infl_trtWestHeat > post_6c$infl_intercept )) / nrow(post_6c)
[1] 0.99975

posterior probability that inflection point for trtWest is greater than inflection point for East

sum( (post_6c$infl_trtWest > post_6c$infl_intercept )) / nrow(post_6c)
[1] 1

posterior probability that inflection point for trtWest is greater than inflection point for trtWestHeat

sum((post_6c$infl_trtWest > post_6c$infl_trtWestHeat)) / nrow(post_6c)
[1] 1

posterior probability that k for trtWestHeat is less than East

hypothesis(fit_6c, "k_trtWestHeat < 0")
Hypothesis Tests for class b:
---
'CI': 90%-CI for one-sided and 95%-CI for two-sided hypotheses.
'*': For one-sided hypotheses, the posterior probability exceeds 95%;
for two-sided hypotheses, the value tested against lies outside the 95%-CI.
Posterior probabilities of point hypotheses assume equal prior probabilities.

posterior probability that k for trtWestHeat is less than East

sum( (post_6c$b_k_trtWest < 0 )) / nrow(post_6c)
[1] 1
sum( (post_6c$b_k_trtWest + post_6c$b_k_Intercept < post_6c$b_k_Intercept   )) / nrow(post_6c)
[1] 1

Plots

k

post_6c %>%
  as.data.frame() %>%
  select(starts_with("b_k")) %>%
  rename_with(~str_remove(.,"b_")) %>%
  rename_with(~str_remove(.,"trt")) %>%
  rename(k_East=k_Intercept) %>%
  mutate(k_West=k_West+k_East,
         k_WestHeat=k_WestHeat+k_East) %>%
  summarize(across(.fns = list(mean = mean,
                               lower = ~ rethinking::HPDI(., 0.95)[1],
                               upper = ~ rethinking::HPDI(., 0.95)[2])
                   )) %>%
  pivot_longer(cols=everything(), names_to=c("trt", "parameter"), names_pattern = "k_(.*)_(.*)") %>%
  pivot_wider(names_from = "parameter", values_from = "value") %>%
  ggplot(aes(x=trt, y=mean, ymin=lower, ymax=upper)) +
  geom_col(fill="skyblue") + 
  geom_errorbar(width=.5) +
  ylab("k") +
  xlab("Condition") +
  theme_bw()

ggsave("../output/style_k_plot.pdf", height=3, width = 3)

inflection

post_6c %>%
  as.data.frame() %>%
  select(starts_with("infl")) %>%
  rename_with(~str_remove(.,"trt")) %>%
  rename_with(~str_remove(.,"infl_")) %>%
  rename(East=intercept) %>%
  summarize(across(.fns = list(mean = mean,
                               lower = ~ rethinking::HPDI(., 0.95)[1],
                               upper = ~ rethinking::HPDI(., 0.95)[2])
                   )) %>%
  pivot_longer(cols=everything(), names_to=c("trt", "parameter"), names_pattern = "(.*)_(.*)") %>%
  pivot_wider(names_from = "parameter", values_from = "value") %>%
  ggplot(aes(x=trt, y=mean, ymin=lower, ymax=upper)) +
  geom_col(fill="red") + 
  geom_errorbar(width=.5) +
  ylab("Inflection Point") +
  xlab("Condition") +
  theme_bw()

ggsave("../output/style_inflection_plot.pdf", height=3, width = 3)

posterior probability that k for trtWest is less than k for trtWestHEat

sum( (post_6c$b_k_trtWest < post_6c$b_k_trtWestHeat  )) / nrow(post_6c)
[1] 1
---
title: "Style growth models"
output: html_notebook
---

```{r}
library(tidyverse)
library(skimr)
library(brms)
```

## Data import and wrangle

```{r}
style <- read_csv("../input/Styleelongation2017_ZTslh.csv") %>%
  complete(day, floret, treatment, time)
```
```{r}
style <- style %>% rename(trt=treatment, length=`length (mm)`) %>%
  mutate(hour=as.numeric(time-min(time)) / 3600,  # elapsed hours, first hour = 0
         id=str_c(floret,day,trt,sep="-"))  # unique id for each floret
style %>% mutate(trt=as.factor(trt)) %>% skim()
unique(style$trt)
```

Plot to see what we have:
```{r}
style %>%
  ggplot(aes(x=time, y=length, color=trt, shape=as.character(floret))) +
  geom_point() +
  facet_wrap(~day)
```

remove a few missing data rows that are not at the end of the series
```{r}
style <- style %>% filter(day!=1 | floret!=3 | trt!="East" | hour!=3.5) %>%
  filter(!(is.na(length) & hour < 2))
```

Fix height to max height.  This is necessary because our growth model can't account for shrinkage after reaching max height.  Fill in missing end-of-series values, but note that they are censored.
```{r}
style <- style %>% 
  mutate(censored = ifelse(is.na(length), "right", "none")) %>%
  group_by(id) %>%
  arrange(hour) %>% 
  mutate(length2=ifelse((length < max(length, na.rm = TRUE) | is.na(length)) & row_number() > which.max(length),
                       max(length, na.rm = TRUE), length)) %>%
  ungroup() %>%
  filter(!is.na(length2))
```

Plot smoothed data
```{r}
style %>%
  ggplot(aes(x=time, y=length2, color=trt)) +
  geom_smooth() +
  facet_wrap(~day)
```

And plot averaged over days:
```{r}
style %>%
  ggplot(aes(x=time, y=length2, color=trt)) +
  geom_smooth()
```

### prior predictions

To help figure out reasonable priors

```{r}
weibull <- function(Lmin, Lmax, k, delta, hour) {
  y <- Lmax - (Lmax - Lmin) * exp(-(k*hour)^delta)
  tibble(hour=hour, y=y)
}
```

set up tibble with some priors

```{r}
x <- seq(0,1,.01)
plot(x, dexp(x,1), type = "l")
```

k only 
```{r}
tibble(
  Lmin=7.5,#rnorm(50, 7.5, 1),
  Lmax=11.5,#rnorm(50, 11.5, 1),
  k=rexp(50,1),
  delta=1) %>% #rnorm(50,1,.5)) %>%
  mutate(delta=ifelse(delta < 0, 0, delta)) %>%
  
  mutate(prediction=pmap(list(Lmin, Lmax, k, delta), weibull, seq(0,4.5,.1) ),
         sample=row_number()) %>%
  unnest(prediction) %>%

  ggplot(aes(x=hour,y=y, group=sample)) +
  geom_line(alpha=.2)
```
delta only
```{r}
tibble(
  Lmin=7.5,#rnorm(50, 7.5, 1),
  Lmax=11.5,#rnorm(50, 11.5, 1),
  k=.5,
  delta=rexp(50,.5)) %>%
  mutate(prediction=pmap(list(Lmin, Lmax, k, delta), weibull, seq(0,4.5,.1) ),
         sample=row_number()) %>%
  unnest(prediction) %>%

  ggplot(aes(x=hour,y=y, group=sample)) +
  geom_line(alpha=.2)
```

delta and k
```{r}
tibble(
  Lmin=7.5,#rnorm(50, 7.5, 1),
  Lmax=11.5,#rnorm(50, 11.5, 1),
  k=rexp(100, 1),
  delta=rexp(100,.5)) %>%
  mutate(prediction=pmap(list(Lmin, Lmax, k, delta), weibull, seq(0,4.5,.1) ),
         sample=row_number()) %>%
  unnest(prediction) %>%

  ggplot(aes(x=hour,y=y, group=sample)) +
  geom_line(alpha=.2)
```

all together
```{r}
tibble(
  Lmin=rnorm(100, 7.5, .25),
  Lmax=rnorm(100, 11.5, .25),
  k=rexp(100, 1),
  delta=rexp(100, 0.5)) %>%
  mutate(prediction=pmap(list(Lmin, Lmax, k, delta), weibull, seq(0,4.5,.1) ),
         sample=row_number()) %>%
  unnest(prediction) %>%

  ggplot(aes(x=hour,y=y, group=sample)) +
  geom_line(alpha=.2)
```

# models

## priors

what priors are available to be set?

```{r}
get_prior(bf(
    length2 | cens(censored) ~ Lmax - (Lmax - Lmin) * exp(-(k*hour)^delta), #Weibull model
    Lmax + Lmin ~ 1, 
    delta + k ~ trt + (1|id),
    nl=TRUE ),
  data=style)
```

## minimal 

```{r}
prior1 <- c(prior(normal(7.5,.25), nlpar="Lmin"),
            prior(normal(11.5, .25),nlpar="Lmax"),
            prior(exponential(1),nlpar="k", lb=0),
            prior(exponential(.5),nlpar="delta", lb=0) # 1 is exponential model
)

fit1c <- brm(
  formula=bf(
    length2 | cens(censored) ~ Lmax - (Lmax - Lmin) * exp(-(k*hour)^delta), #Weibull model
    Lmax + Lmin + k + delta ~ 1, #minimal model, parameters do not vary for treatment or random effects
    nl=TRUE),
  data=style,
  prior=prior1,
  sample_prior = "yes",
  cores=4,
  chains=4)
gc()
fit1c <- add_criterion(fit1c, "loo")
```


```{r}
summary(fit1c)
gc()
```

```{r}
plot(fit1c)
gc()
```

```{r}
pairs(fit1c)
gc()
```

```{r}
pred1c <- predict(fit1c)
gc()
```

```{r}
pred1c <- cbind(pred1c, style)

pred1c %>% ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_smooth(span=1) +
  geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_line(aes(y=Estimate),color="yellow") 
```

```{r}
pp_check(fit1c)
```

## fit2c:

```{r, eval=TRUE}

prior2 <- c(prior(normal(7.5,.25), nlpar="Lmin"),
            prior(normal(11.5, .25),nlpar="Lmax"),
            prior(exponential(1),nlpar="k", lb=0),
            prior(exponential(0.5),nlpar="delta"), # 1 is exponential model
            prior(normal(0,0.5), nlpar="delta", coef="trtWest"),
            prior(normal(0,0.5), nlpar="delta", coef="trtWestHeat")
)

fit2c <- brm( # ignore the warning about lower bounds.  If we set it for the intercept, it impacts the other delta coefficients and we don't want that.
  formula=bf(
    length2 | cens(censored) ~ Lmax - (Lmax - Lmin) * exp(-(k*hour)^delta), #Weibull model
    Lmax + Lmin + k ~ 1, 
    delta ~ trt + (1|id),   # 
    nl=TRUE ),
  data=style,
  prior=prior2,
  sample_prior = "yes",
  cores=4,
  chains=4,
  iter=5000 #,
  #control=list(adapt_delta=0.9)
  )

fit2c <- add_criterion(fit2c, "loo")

gc()
```

```{r}
summary(fit2c)
gc()
```


```{r, fig.height=10}
plot(fit2c, ask = FALSE)
gc()
```

```{r}
plot(conditional_effects(fit2c), ask=FALSE)
pp_check(fit2c)
```

```{r}
pred2c <- predict(fit2c)
pred2c %>%
  cbind(style) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_smooth(span=1) +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_smooth(aes(y=Estimate), lty=2, span=1) 
```

```{r}
pred2c %>%
  cbind(style) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_smooth(span=1) +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_smooth(aes(y=Estimate), lty=2, span=1)  +
  facet_wrap(~day, scales = "free_y")
```


```{r}
pred2c %>%
  cbind(style) %>%
  group_by(day, trt, hour) %>%
  summarize(length2 = mean(length2),
            Estimate = mean(Estimate)) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_point() +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_line(aes(y=Estimate), lty=2) +
  facet_wrap(~day, scales = "free_y")
```

```{r}
loo_compare(fit1c, fit2c)
```

Fit2c is preferred, but predictions are not that good relative to observed

```{r}
hypothesis(fit2c, c("delta_trtWest  = 0",
                    "delta_trtWestHeat = 0",
                    "delta_trtWest = delta_trtWestHeat"))
```

## fit3c:

```{r, eval=TRUE}
inits3 <- function() { list(
  b_Lmax=rnorm(1, 11.5, .25),
  b_Lmin=rnorm(1, 7.5, .25),
  b_delta=rexp(1, .5),
  b_k=c(rexp(1, 1), 0, 0),
  z_1=array(0, dim=c(72,1362))
)
}

prior3 <- c(prior(normal(7.5,.25), nlpar="Lmin"),
            prior(normal(11.5, .25),nlpar="Lmax"),
            prior(exponential(1),nlpar="k"),
            prior(normal(0,0.02), nlpar="k", coef="trtWest"),
            prior(normal(0,0.02), nlpar="k", coef="trtWestHeat"),
            prior(exponential(0.5),nlpar="delta") 
)

fit3c <- brm( # ignore the warning about k. 
  formula=bf(
    length2 | cens(censored) ~ Lmax - (Lmax - Lmin) * exp(-(k*hour)^delta), #Weibull model
    Lmax + Lmin + delta ~ 1, 
    k ~ trt + (1|id),   # 
    nl=TRUE ),
  data=style,
  prior=prior3,
  sample_prior = "yes",
  inits = inits3,
  cores=4,
  chains=4,
  iter=5000,
  control=list(adapt_delta=0.99)
  )

fit3c <- add_criterion(fit3c, "loo")

gc()
```

```{r}
summary(fit3c)
gc()
```

```{r}
pairs(fit3c, pars="^b_")
```


```{r, fig.height=10}
plot(fit3c, ask = FALSE)
gc()
```

```{r}
plot(conditional_effects(fit3c), ask=FALSE)
pp_check(fit3c)
```

```{r}
pred3c <- predict(fit3c)
pred3c %>%
  cbind(style) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_smooth(span=1) +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_smooth(aes(y=Estimate), lty=2, span=1) 
```

```{r}
pred3c %>%
  cbind(style) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_smooth(span=1) +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_smooth(aes(y=Estimate), lty=2, span=1)  +
  facet_wrap(~day, scales = "free_y")
```


```{r}
pred3c %>%
  cbind(style) %>%
  group_by(day, trt, hour) %>%
  summarize(length2 = mean(length2),
            Estimate = mean(Estimate)) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_point() +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_line(aes(y=Estimate), lty=2) +
  facet_wrap(~day, scales = "free_y")
```

```{r}
loo_compare(fit1c, fit2c, fit3c)
```


## fit_4c: k and delta ~ trt.  

```{r, eval=TRUE}
inits4 <- function() { list(
  b_Lmax=rnorm(1, 11.5, .25),
  b_Lmin=rnorm(1, 7.5, .25),
  b_delta=c(rexp(1, .5), 0, 0),
  b_k=c(rexp(1, 1), 0, 0),
  z_1=array(0, dim=c(72,1)),
  z_2=array(0, dim=c(72,1))
)
}

prior4 <- c(prior(normal(7.5,.25), nlpar="Lmin"),
            prior(normal(11.5, .25),nlpar="Lmax"),
            prior(exponential(1),nlpar="k"),
            prior(normal(0,0.02), nlpar="k", coef="trtWest"),
            prior(normal(0,0.02), nlpar="k", coef="trtWestHeat"),
            prior(exponential(0.5), nlpar="delta"), # 1 is exponential model
            prior(normal(0,0.25), nlpar="delta", coef="trtWest"),
            prior(normal(0,0.25), nlpar="delta", coef="trtWestHeat")
)

fit_4c <- brm(
  formula=bf(
    length2 | cens(censored) ~ Lmax - (Lmax - Lmin) * exp(-(k*hour)^delta), #Weibull model
    Lmax + Lmin ~ 1, 
    delta + k ~ trt + (1|id),   # 
    nl=TRUE ),
  data=style,
  prior=prior4,
  inits=inits4,
  cores=4,
  chains=4,
  iter=10000, control = list(adapt_delta=0.95)
) 

fit_4c <- add_criterion(fit_4c, "loo")

gc()
```

```{r}
summary(fit_4c)
gc()
```

```{r}
pairs(fit_4c, pars = "^b_")
```


```{r, fig.height=10}
plot(fit_4c, ask = FALSE)
gc()
```

```{r}
plot(conditional_effects(fit_4c), ask=FALSE)
pp_check(fit_4c)
```

```{r}
pred_4c <- predict(fit_4c)
pred_4c %>%
  cbind(style) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_smooth(span=1) +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_smooth(aes(y=Estimate), lty=2, span=1) 
```

```{r}
pred_4c %>%
  cbind(style) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_smooth(span=1) +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_smooth(aes(y=Estimate), lty=2, span=1)  +
  facet_wrap(~day, scales="free_y")
```


```{r}
pred_4c %>%
  cbind(style) %>%
  group_by(trt, hour) %>%
  summarize(length2 = mean(length2),
            Estimate = mean(Estimate)) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_line() +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_line(aes(y=Estimate), lty=2) 
```

```{r}
pred_4c %>%
  cbind(style) %>%
  group_by(day, trt, hour) %>%
  summarize(length2 = mean(length2),
            Estimate = mean(Estimate)) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_point() +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_line(aes(y=Estimate), lty=2) +
  facet_wrap(~day, scales = "free_y")
```

```{r}
loo_compare(fit1c, fit2c, fit3c, fit_4c)
```

```{r}
save.image(file="../output/models.Rdata")
```


## fit_5c: k + delta ~ trt + (1|id); Lmax ~ 1|day

```{r, eval=TRUE}
inits5 <- function() { list(
  b_Lmax=rnorm(1, 11.5, .25),
  b_Lmin=rnorm(1, 7.5, .25),
  b_delta=c(rexp(1, .5), 0, 0),
  b_k=c(rexp(1, 1), 0, 0),
  z_1=array(0, dim=c(8,1)),
  z_2=array(0, dim=c(72,1)),
  z_3=array(0, dim=c(72,1))
)
}

prior5 <- c(prior(normal(7.5,.25), nlpar="Lmin"),
            prior(normal(11.5, .25),nlpar="Lmax"),
            prior(exponential(1),nlpar="k"),
            prior(normal(0,0.02), nlpar="k", coef="trtWest"),
            prior(normal(0,0.02), nlpar="k", coef="trtWestHeat"),
            prior(exponential(0.5), nlpar="delta"), # 1 is exponential model
            prior(normal(0,0.25), nlpar="delta", coef="trtWest"),
            prior(normal(0,0.25), nlpar="delta", coef="trtWestHeat")
)

fit_5c <- brm(
  formula=bf(
    length2 | cens(censored) ~ Lmax - (Lmax - Lmin) * exp(-(k*hour)^delta), #Weibull model
    Lmin ~ 1, 
    Lmax ~ (1|day),
    delta + k ~ trt + (1|id),   # 
    nl=TRUE ),
  data=style,
  prior=prior5,
  sample_prior = "yes",
  inits=inits5,
  cores=4,
  chains=4,
  iter=2000
) 

fit_5c <- add_criterion(fit_5c, "loo")

gc()
```

```{r}
summary(fit_5c)
gc()
```

```{r, fig.height=10}
plot(fit_5c, ask = FALSE)
gc()
```

```{r}
plot(conditional_effects(fit_5c), ask=FALSE)
pp_check(fit_5c)
```

```{r}
pred_5c <- predict(fit_5c)
pred_5c %>%
  cbind(style) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_smooth(span=1) +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_smooth(aes(y=Estimate), lty=2, span=1) 
```

```{r}
pred_5c %>%
  cbind(style) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_smooth(span=1) +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_smooth(aes(y=Estimate), lty=2, span=1)  +
  facet_wrap(~day)
```


```{r}
pred_5c %>%
  cbind(style) %>%
  group_by(trt, hour) %>%
  summarize(length2 = mean(length2),
            Estimate = mean(Estimate)) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_line() +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_line(aes(y=Estimate), lty=2) 
```

```{r}
pred_5c %>%
  cbind(style) %>%
  group_by(day, trt, hour) %>%
  summarize(length2 = mean(length2),
            Estimate = mean(Estimate)) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_point() +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_line(aes(y=Estimate), lty=2) +
  facet_wrap(~day, scales = "free_y")
```

```{r}
loo_compare(fit1c, fit2c, fit3c, fit_4c, fit_5c)
```


## fit_6c:

```{r, eval=TRUE}

inits6 <- function() { list(
  b_Lmax=rnorm(1, 11.5, .25),
  b_Lmin=rnorm(1, 7.5, .25),
  b_delta=c(rexp(1, .5), 0, 0),
  b_k=c(rexp(1, 1), 0, 0),
  z_1=array(0, dim=c(8,1)),
  z_2=array(0, dim=c(8,1)),
  z_3=array(0, dim=c(72,1)),
  z_4=array(0, dim=c(72,1))
)
}

prior6 <- c(prior(normal(7.5,.25), nlpar="Lmin"),
            prior(normal(11.5, .25),nlpar="Lmax"),
            prior(exponential(1),nlpar="k"),
            prior(normal(0,0.04), nlpar="k", coef="trtWest"),
            prior(normal(0,0.04), nlpar="k", coef="trtWestHeat"),
            prior(exponential(0.5), nlpar="delta"), # 1 is exponential model
            prior(normal(0,0.25), nlpar="delta", coef="trtWest"),
            prior(normal(0,0.25), nlpar="delta", coef="trtWestHeat")
)

fit_6c <- brm(
  formula=bf(
    length2 | cens(censored) ~ Lmax - (Lmax - Lmin) * exp(-(k*hour)^delta), #Weibull model
    Lmax + Lmin ~ (1|day),
    delta + k ~ trt + (1|id),   # 
    nl=TRUE ),
  data=style,
  prior=prior6,
  sample_prior = "yes",
  inits=inits6,
  cores=4,
  chains=4,
  iter=2000
  )

fit_6c <- add_criterion(fit_6c, "loo")

gc()
```


```{r}
summary(fit_6c)
gc()
```

```{r, height=10}
plot(fit_6c, ask = FALSE)
gc()
```

```{r}
plot(conditional_effects(fit_6c), ask=FALSE)
pp_check(fit_6c)
```

```{r}
pred_6c <- predict(fit_6c) 
pred_6c %>%
  cbind(style) %>%
  ggplot(aes(x=hour, y=length2, color=trt)) +
  geom_smooth(span=1) +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_smooth(aes(y=Estimate), lty=2, span=1) 
```

```{r}
pred_6c %>%
  cbind(style) %>%
  ggplot(aes(x=hour, y=length, color=trt)) +
  geom_smooth(span=1) +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_smooth(aes(y=Estimate), lty=2, span=1)  +
  facet_wrap(~day, scales = "free_y")
```

```{r}
pred_6c %>%
  cbind(style) %>%
  group_by(day, trt, hour) %>%
  summarize(length = mean(length),
            Estimate = mean(Estimate)) %>%
  ggplot(aes(x=hour, y=length, color=trt)) +
  geom_point() +
  #geom_ribbon(aes(ymin=Q2.5, ymax=Q97.5), color="grey90", fill="black", alpha=.1) +
  geom_line(aes(y=Estimate), lty=2) +
  facet_wrap(~day, scales = "free_y", ncol = 4) +
  theme(legend.position = "top")
ggsave("../output/style_fit_plot.pdf", width=6.5, height = 4)
```

```{r}
save.image("../output/StyleModelFits.Rdata")
```


## compare models

```{r}
loo_compare(fit1c, fit2c, fit3c, fit_4c, fit_5c, fit_6c)
```


## posterior of best model

Get the posterior
```{r}
post_6c <- posterior_samples(fit_6c, pars="b.*")
```

posterior for inflection point
```{r}
# inflection point equation
infl.pt <- function(delta, k) {
  (1/k) *
    ((delta -1) / delta)^(1/delta)
}

post_6c$infl_intercept <- infl.pt(post_6c$b_delta_Intercept, post_6c$b_k_Intercept)

post_6c$infl_trtWest <-
  infl.pt(
    post_6c$b_delta_Intercept + post_6c$b_delta_trtWest,
    post_6c$b_k_Intercept + post_6c$b_k_trtWest)

post_6c$infl_trtWestHeat <-
  infl.pt(
    post_6c$b_delta_Intercept+post_6c$b_delta_trtWestHeat,
    post_6c$b_k_Intercept + post_6c$b_k_trtWestHeat)

```


```{r}
apply(post_6c, 2, function(x) mean(x))
```

posterior probability that inflection point for trtWestHeat is greater than inflection point for East
```{r}
sum( (post_6c$infl_trtWestHeat > post_6c$infl_intercept )) / nrow(post_6c)
```

posterior probability that inflection point for trtWest is greater than inflection point for East
```{r}
sum( (post_6c$infl_trtWest > post_6c$infl_intercept )) / nrow(post_6c)
```

posterior probability that inflection point for trtWest is greater than inflection point for trtWestHeat
```{r}
sum((post_6c$infl_trtWest > post_6c$infl_trtWestHeat)) / nrow(post_6c)
```

posterior probability that k for trtWestHeat is less than East
```{r}
sum( (post_6c$b_k_trtWestHeat < 0 )) / nrow(post_6c)
sum( (post_6c$b_k_trtWestHeat + post_6c$b_k_Intercept < post_6c$b_k_Intercept   )) / nrow(post_6c)
hypothesis(fit_6c, "k_trtWestHeat < 0")
```

posterior probability that k for trtWestHeat is less than East
```{r}
sum( (post_6c$b_k_trtWest < 0 )) / nrow(post_6c)
sum( (post_6c$b_k_trtWest + post_6c$b_k_Intercept < post_6c$b_k_Intercept   )) / nrow(post_6c)
```

### Plots

k
```{r, fig.width=3}
post_6c %>%
  as.data.frame() %>%
  select(starts_with("b_k")) %>%
  rename_with(~str_remove(.,"b_")) %>%
  rename_with(~str_remove(.,"trt")) %>%
  rename(k_East=k_Intercept) %>%
  mutate(k_West=k_West+k_East,
         k_WestHeat=k_WestHeat+k_East) %>%
  summarize(across(.fns = list(mean = mean,
                               lower = ~ rethinking::HPDI(., 0.95)[1],
                               upper = ~ rethinking::HPDI(., 0.95)[2])
                   )) %>%
  pivot_longer(cols=everything(), names_to=c("trt", "parameter"), names_pattern = "k_(.*)_(.*)") %>%
  pivot_wider(names_from = "parameter", values_from = "value") %>%
  ggplot(aes(x=trt, y=mean, ymin=lower, ymax=upper)) +
  geom_col(fill="skyblue") + 
  geom_errorbar(width=.5) +
  ylab("k") +
  xlab("Condition") +
  theme_bw()
ggsave("../output/style_k_plot.pdf", height=3, width = 3)
```
inflection
```{r, fig.width=3}
post_6c %>%
  as.data.frame() %>%
  select(starts_with("infl")) %>%
  rename_with(~str_remove(.,"trt")) %>%
  rename_with(~str_remove(.,"infl_")) %>%
  rename(East=intercept) %>%
  summarize(across(.fns = list(mean = mean,
                               lower = ~ rethinking::HPDI(., 0.95)[1],
                               upper = ~ rethinking::HPDI(., 0.95)[2])
                   )) %>%
  pivot_longer(cols=everything(), names_to=c("trt", "parameter"), names_pattern = "(.*)_(.*)") %>%
  pivot_wider(names_from = "parameter", values_from = "value") %>%
  ggplot(aes(x=trt, y=mean, ymin=lower, ymax=upper)) +
  geom_col(fill="red") + 
  geom_errorbar(width=.5) +
  ylab("Inflection Point") +
  xlab("Condition") +
  theme_bw()
ggsave("../output/style_inflection_plot.pdf", height=3, width = 3)
```



posterior probability that k for trtWest is less than k for trtWestHEat
```{r}
sum( (post_6c$b_k_trtWest < post_6c$b_k_trtWestHeat  )) / nrow(post_6c)
```
